1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31#include <linux/pci.h>
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34#include <linux/nospec.h>
35#include <linux/pm_runtime.h>
36#include <asm/processor.h>
37#include "hwmgr.h"
38
39static const struct cg_flag_name clocks[] = {
40 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
51 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
52 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
54 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
57 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
60 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
63 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
67 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
68 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
69 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
73 {0, NULL},
74};
75
76static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79} temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83};
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
120 struct device_attribute *attr,
121 char *buf)
122{
123 struct drm_device *ddev = dev_get_drvdata(dev);
124 struct amdgpu_device *adev = drm_to_adev(ddev);
125 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
126 enum amd_pm_state_type pm;
127 int ret;
128
129 if (amdgpu_in_reset(adev))
130 return -EPERM;
131 if (adev->in_suspend && !adev->in_runpm)
132 return -EPERM;
133
134 ret = pm_runtime_get_sync(ddev->dev);
135 if (ret < 0) {
136 pm_runtime_put_autosuspend(ddev->dev);
137 return ret;
138 }
139
140 if (pp_funcs->get_current_power_state) {
141 pm = amdgpu_dpm_get_current_power_state(adev);
142 } else {
143 pm = adev->pm.dpm.user_state;
144 }
145
146 pm_runtime_mark_last_busy(ddev->dev);
147 pm_runtime_put_autosuspend(ddev->dev);
148
149 return sysfs_emit(buf, "%s\n",
150 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
151 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
152}
153
154static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
155 struct device_attribute *attr,
156 const char *buf,
157 size_t count)
158{
159 struct drm_device *ddev = dev_get_drvdata(dev);
160 struct amdgpu_device *adev = drm_to_adev(ddev);
161 enum amd_pm_state_type state;
162 int ret;
163
164 if (amdgpu_in_reset(adev))
165 return -EPERM;
166 if (adev->in_suspend && !adev->in_runpm)
167 return -EPERM;
168
169 if (strncmp("battery", buf, strlen("battery")) == 0)
170 state = POWER_STATE_TYPE_BATTERY;
171 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
172 state = POWER_STATE_TYPE_BALANCED;
173 else if (strncmp("performance", buf, strlen("performance")) == 0)
174 state = POWER_STATE_TYPE_PERFORMANCE;
175 else
176 return -EINVAL;
177
178 ret = pm_runtime_get_sync(ddev->dev);
179 if (ret < 0) {
180 pm_runtime_put_autosuspend(ddev->dev);
181 return ret;
182 }
183
184 if (is_support_sw_smu(adev)) {
185 mutex_lock(&adev->pm.mutex);
186 adev->pm.dpm.user_state = state;
187 mutex_unlock(&adev->pm.mutex);
188 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
189 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
190 } else {
191 mutex_lock(&adev->pm.mutex);
192 adev->pm.dpm.user_state = state;
193 mutex_unlock(&adev->pm.mutex);
194
195 amdgpu_pm_compute_clocks(adev);
196 }
197 pm_runtime_mark_last_busy(ddev->dev);
198 pm_runtime_put_autosuspend(ddev->dev);
199
200 return count;
201}
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
264 struct device_attribute *attr,
265 char *buf)
266{
267 struct drm_device *ddev = dev_get_drvdata(dev);
268 struct amdgpu_device *adev = drm_to_adev(ddev);
269 enum amd_dpm_forced_level level = 0xff;
270 int ret;
271
272 if (amdgpu_in_reset(adev))
273 return -EPERM;
274 if (adev->in_suspend && !adev->in_runpm)
275 return -EPERM;
276
277 ret = pm_runtime_get_sync(ddev->dev);
278 if (ret < 0) {
279 pm_runtime_put_autosuspend(ddev->dev);
280 return ret;
281 }
282
283 if (adev->powerplay.pp_funcs->get_performance_level)
284 level = amdgpu_dpm_get_performance_level(adev);
285 else
286 level = adev->pm.dpm.forced_level;
287
288 pm_runtime_mark_last_busy(ddev->dev);
289 pm_runtime_put_autosuspend(ddev->dev);
290
291 return sysfs_emit(buf, "%s\n",
292 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
293 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
294 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
295 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
296 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
297 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
298 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
300 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
301 "unknown");
302}
303
304static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
305 struct device_attribute *attr,
306 const char *buf,
307 size_t count)
308{
309 struct drm_device *ddev = dev_get_drvdata(dev);
310 struct amdgpu_device *adev = drm_to_adev(ddev);
311 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
312 enum amd_dpm_forced_level level;
313 enum amd_dpm_forced_level current_level = 0xff;
314 int ret = 0;
315
316 if (amdgpu_in_reset(adev))
317 return -EPERM;
318 if (adev->in_suspend && !adev->in_runpm)
319 return -EPERM;
320
321 if (strncmp("low", buf, strlen("low")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_LOW;
323 } else if (strncmp("high", buf, strlen("high")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_HIGH;
325 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_AUTO;
327 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_MANUAL;
329 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
331 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
333 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
334 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
335 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
337 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
339 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
341 } else {
342 return -EINVAL;
343 }
344
345 ret = pm_runtime_get_sync(ddev->dev);
346 if (ret < 0) {
347 pm_runtime_put_autosuspend(ddev->dev);
348 return ret;
349 }
350
351 if (pp_funcs->get_performance_level)
352 current_level = amdgpu_dpm_get_performance_level(adev);
353
354 if (current_level == level) {
355 pm_runtime_mark_last_busy(ddev->dev);
356 pm_runtime_put_autosuspend(ddev->dev);
357 return count;
358 }
359
360 if (adev->asic_type == CHIP_RAVEN) {
361 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
362 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
363 amdgpu_gfx_off_ctrl(adev, false);
364 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
365 amdgpu_gfx_off_ctrl(adev, true);
366 }
367 }
368
369
370 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
371 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
372 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
373 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
374 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
375 pr_err("Currently not in any profile mode!\n");
376 pm_runtime_mark_last_busy(ddev->dev);
377 pm_runtime_put_autosuspend(ddev->dev);
378 return -EINVAL;
379 }
380
381 if (pp_funcs->force_performance_level) {
382 mutex_lock(&adev->pm.mutex);
383 if (adev->pm.dpm.thermal_active) {
384 mutex_unlock(&adev->pm.mutex);
385 pm_runtime_mark_last_busy(ddev->dev);
386 pm_runtime_put_autosuspend(ddev->dev);
387 return -EINVAL;
388 }
389 ret = amdgpu_dpm_force_performance_level(adev, level);
390 if (ret) {
391 mutex_unlock(&adev->pm.mutex);
392 pm_runtime_mark_last_busy(ddev->dev);
393 pm_runtime_put_autosuspend(ddev->dev);
394 return -EINVAL;
395 } else {
396 adev->pm.dpm.forced_level = level;
397 }
398 mutex_unlock(&adev->pm.mutex);
399 }
400 pm_runtime_mark_last_busy(ddev->dev);
401 pm_runtime_put_autosuspend(ddev->dev);
402
403 return count;
404}
405
406static ssize_t amdgpu_get_pp_num_states(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct drm_device *ddev = dev_get_drvdata(dev);
411 struct amdgpu_device *adev = drm_to_adev(ddev);
412 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
413 struct pp_states_info data;
414 uint32_t i;
415 int buf_len, ret;
416
417 if (amdgpu_in_reset(adev))
418 return -EPERM;
419 if (adev->in_suspend && !adev->in_runpm)
420 return -EPERM;
421
422 ret = pm_runtime_get_sync(ddev->dev);
423 if (ret < 0) {
424 pm_runtime_put_autosuspend(ddev->dev);
425 return ret;
426 }
427
428 if (pp_funcs->get_pp_num_states) {
429 amdgpu_dpm_get_pp_num_states(adev, &data);
430 } else {
431 memset(&data, 0, sizeof(data));
432 }
433
434 pm_runtime_mark_last_busy(ddev->dev);
435 pm_runtime_put_autosuspend(ddev->dev);
436
437 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
438 for (i = 0; i < data.nums; i++)
439 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
440 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
441 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
442 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
443 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
444
445 return buf_len;
446}
447
448static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
449 struct device_attribute *attr,
450 char *buf)
451{
452 struct drm_device *ddev = dev_get_drvdata(dev);
453 struct amdgpu_device *adev = drm_to_adev(ddev);
454 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
455 struct pp_states_info data = {0};
456 enum amd_pm_state_type pm = 0;
457 int i = 0, ret = 0;
458
459 if (amdgpu_in_reset(adev))
460 return -EPERM;
461 if (adev->in_suspend && !adev->in_runpm)
462 return -EPERM;
463
464 ret = pm_runtime_get_sync(ddev->dev);
465 if (ret < 0) {
466 pm_runtime_put_autosuspend(ddev->dev);
467 return ret;
468 }
469
470 if (pp_funcs->get_current_power_state
471 && pp_funcs->get_pp_num_states) {
472 pm = amdgpu_dpm_get_current_power_state(adev);
473 amdgpu_dpm_get_pp_num_states(adev, &data);
474 }
475
476 pm_runtime_mark_last_busy(ddev->dev);
477 pm_runtime_put_autosuspend(ddev->dev);
478
479 for (i = 0; i < data.nums; i++) {
480 if (pm == data.states[i])
481 break;
482 }
483
484 if (i == data.nums)
485 i = -EINVAL;
486
487 return sysfs_emit(buf, "%d\n", i);
488}
489
490static ssize_t amdgpu_get_pp_force_state(struct device *dev,
491 struct device_attribute *attr,
492 char *buf)
493{
494 struct drm_device *ddev = dev_get_drvdata(dev);
495 struct amdgpu_device *adev = drm_to_adev(ddev);
496
497 if (amdgpu_in_reset(adev))
498 return -EPERM;
499 if (adev->in_suspend && !adev->in_runpm)
500 return -EPERM;
501
502 if (adev->pp_force_state_enabled)
503 return amdgpu_get_pp_cur_state(dev, attr, buf);
504 else
505 return sysfs_emit(buf, "\n");
506}
507
508static ssize_t amdgpu_set_pp_force_state(struct device *dev,
509 struct device_attribute *attr,
510 const char *buf,
511 size_t count)
512{
513 struct drm_device *ddev = dev_get_drvdata(dev);
514 struct amdgpu_device *adev = drm_to_adev(ddev);
515 enum amd_pm_state_type state = 0;
516 unsigned long idx;
517 int ret;
518
519 if (amdgpu_in_reset(adev))
520 return -EPERM;
521 if (adev->in_suspend && !adev->in_runpm)
522 return -EPERM;
523
524 if (strlen(buf) == 1)
525 adev->pp_force_state_enabled = false;
526 else if (is_support_sw_smu(adev))
527 adev->pp_force_state_enabled = false;
528 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
529 adev->powerplay.pp_funcs->get_pp_num_states) {
530 struct pp_states_info data;
531
532 ret = kstrtoul(buf, 0, &idx);
533 if (ret || idx >= ARRAY_SIZE(data.states))
534 return -EINVAL;
535
536 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
537
538 amdgpu_dpm_get_pp_num_states(adev, &data);
539 state = data.states[idx];
540
541 ret = pm_runtime_get_sync(ddev->dev);
542 if (ret < 0) {
543 pm_runtime_put_autosuspend(ddev->dev);
544 return ret;
545 }
546
547
548 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
549 state != POWER_STATE_TYPE_DEFAULT) {
550 amdgpu_dpm_dispatch_task(adev,
551 AMD_PP_TASK_ENABLE_USER_STATE, &state);
552 adev->pp_force_state_enabled = true;
553 }
554 pm_runtime_mark_last_busy(ddev->dev);
555 pm_runtime_put_autosuspend(ddev->dev);
556 }
557
558 return count;
559}
560
561
562
563
564
565
566
567
568
569
570
571
572static ssize_t amdgpu_get_pp_table(struct device *dev,
573 struct device_attribute *attr,
574 char *buf)
575{
576 struct drm_device *ddev = dev_get_drvdata(dev);
577 struct amdgpu_device *adev = drm_to_adev(ddev);
578 char *table = NULL;
579 int size, ret;
580
581 if (amdgpu_in_reset(adev))
582 return -EPERM;
583 if (adev->in_suspend && !adev->in_runpm)
584 return -EPERM;
585
586 ret = pm_runtime_get_sync(ddev->dev);
587 if (ret < 0) {
588 pm_runtime_put_autosuspend(ddev->dev);
589 return ret;
590 }
591
592 if (adev->powerplay.pp_funcs->get_pp_table) {
593 size = amdgpu_dpm_get_pp_table(adev, &table);
594 pm_runtime_mark_last_busy(ddev->dev);
595 pm_runtime_put_autosuspend(ddev->dev);
596 if (size < 0)
597 return size;
598 } else {
599 pm_runtime_mark_last_busy(ddev->dev);
600 pm_runtime_put_autosuspend(ddev->dev);
601 return 0;
602 }
603
604 if (size >= PAGE_SIZE)
605 size = PAGE_SIZE - 1;
606
607 memcpy(buf, table, size);
608
609 return size;
610}
611
612static ssize_t amdgpu_set_pp_table(struct device *dev,
613 struct device_attribute *attr,
614 const char *buf,
615 size_t count)
616{
617 struct drm_device *ddev = dev_get_drvdata(dev);
618 struct amdgpu_device *adev = drm_to_adev(ddev);
619 int ret = 0;
620
621 if (amdgpu_in_reset(adev))
622 return -EPERM;
623 if (adev->in_suspend && !adev->in_runpm)
624 return -EPERM;
625
626 ret = pm_runtime_get_sync(ddev->dev);
627 if (ret < 0) {
628 pm_runtime_put_autosuspend(ddev->dev);
629 return ret;
630 }
631
632 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
633 if (ret) {
634 pm_runtime_mark_last_busy(ddev->dev);
635 pm_runtime_put_autosuspend(ddev->dev);
636 return ret;
637 }
638
639 pm_runtime_mark_last_busy(ddev->dev);
640 pm_runtime_put_autosuspend(ddev->dev);
641
642 return count;
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
790 struct device_attribute *attr,
791 const char *buf,
792 size_t count)
793{
794 struct drm_device *ddev = dev_get_drvdata(dev);
795 struct amdgpu_device *adev = drm_to_adev(ddev);
796 int ret;
797 uint32_t parameter_size = 0;
798 long parameter[64];
799 char buf_cpy[128];
800 char *tmp_str;
801 char *sub_str;
802 const char delimiter[3] = {' ', '\n', '\0'};
803 uint32_t type;
804
805 if (amdgpu_in_reset(adev))
806 return -EPERM;
807 if (adev->in_suspend && !adev->in_runpm)
808 return -EPERM;
809
810 if (count > 127)
811 return -EINVAL;
812
813 if (*buf == 's')
814 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
815 else if (*buf == 'p')
816 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
817 else if (*buf == 'm')
818 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
819 else if(*buf == 'r')
820 type = PP_OD_RESTORE_DEFAULT_TABLE;
821 else if (*buf == 'c')
822 type = PP_OD_COMMIT_DPM_TABLE;
823 else if (!strncmp(buf, "vc", 2))
824 type = PP_OD_EDIT_VDDC_CURVE;
825 else if (!strncmp(buf, "vo", 2))
826 type = PP_OD_EDIT_VDDGFX_OFFSET;
827 else
828 return -EINVAL;
829
830 memcpy(buf_cpy, buf, count+1);
831
832 tmp_str = buf_cpy;
833
834 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
835 (type == PP_OD_EDIT_VDDGFX_OFFSET))
836 tmp_str++;
837 while (isspace(*++tmp_str));
838
839 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
840 if (strlen(sub_str) == 0)
841 continue;
842 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
843 if (ret)
844 return -EINVAL;
845 parameter_size++;
846
847 while (isspace(*tmp_str))
848 tmp_str++;
849 }
850
851 ret = pm_runtime_get_sync(ddev->dev);
852 if (ret < 0) {
853 pm_runtime_put_autosuspend(ddev->dev);
854 return ret;
855 }
856
857 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
858 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
859 parameter,
860 parameter_size);
861 if (ret) {
862 pm_runtime_mark_last_busy(ddev->dev);
863 pm_runtime_put_autosuspend(ddev->dev);
864 return -EINVAL;
865 }
866 }
867
868 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
869 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
870 parameter, parameter_size);
871 if (ret) {
872 pm_runtime_mark_last_busy(ddev->dev);
873 pm_runtime_put_autosuspend(ddev->dev);
874 return -EINVAL;
875 }
876 }
877
878 if (type == PP_OD_COMMIT_DPM_TABLE) {
879 if (adev->powerplay.pp_funcs->dispatch_tasks) {
880 amdgpu_dpm_dispatch_task(adev,
881 AMD_PP_TASK_READJUST_POWER_STATE,
882 NULL);
883 pm_runtime_mark_last_busy(ddev->dev);
884 pm_runtime_put_autosuspend(ddev->dev);
885 return count;
886 } else {
887 pm_runtime_mark_last_busy(ddev->dev);
888 pm_runtime_put_autosuspend(ddev->dev);
889 return -EINVAL;
890 }
891 }
892
893 pm_runtime_mark_last_busy(ddev->dev);
894 pm_runtime_put_autosuspend(ddev->dev);
895
896 return count;
897}
898
899static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
900 struct device_attribute *attr,
901 char *buf)
902{
903 struct drm_device *ddev = dev_get_drvdata(dev);
904 struct amdgpu_device *adev = drm_to_adev(ddev);
905 ssize_t size;
906 int ret;
907
908 if (amdgpu_in_reset(adev))
909 return -EPERM;
910 if (adev->in_suspend && !adev->in_runpm)
911 return -EPERM;
912
913 ret = pm_runtime_get_sync(ddev->dev);
914 if (ret < 0) {
915 pm_runtime_put_autosuspend(ddev->dev);
916 return ret;
917 }
918
919 if (adev->powerplay.pp_funcs->print_clock_levels) {
920 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
921 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
922 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
923 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
924 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
925 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
926 } else {
927 size = sysfs_emit(buf, "\n");
928 }
929 pm_runtime_mark_last_busy(ddev->dev);
930 pm_runtime_put_autosuspend(ddev->dev);
931
932 return size;
933}
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951static ssize_t amdgpu_set_pp_features(struct device *dev,
952 struct device_attribute *attr,
953 const char *buf,
954 size_t count)
955{
956 struct drm_device *ddev = dev_get_drvdata(dev);
957 struct amdgpu_device *adev = drm_to_adev(ddev);
958 uint64_t featuremask;
959 int ret;
960
961 if (amdgpu_in_reset(adev))
962 return -EPERM;
963 if (adev->in_suspend && !adev->in_runpm)
964 return -EPERM;
965
966 ret = kstrtou64(buf, 0, &featuremask);
967 if (ret)
968 return -EINVAL;
969
970 ret = pm_runtime_get_sync(ddev->dev);
971 if (ret < 0) {
972 pm_runtime_put_autosuspend(ddev->dev);
973 return ret;
974 }
975
976 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
977 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
978 if (ret) {
979 pm_runtime_mark_last_busy(ddev->dev);
980 pm_runtime_put_autosuspend(ddev->dev);
981 return -EINVAL;
982 }
983 }
984 pm_runtime_mark_last_busy(ddev->dev);
985 pm_runtime_put_autosuspend(ddev->dev);
986
987 return count;
988}
989
990static ssize_t amdgpu_get_pp_features(struct device *dev,
991 struct device_attribute *attr,
992 char *buf)
993{
994 struct drm_device *ddev = dev_get_drvdata(dev);
995 struct amdgpu_device *adev = drm_to_adev(ddev);
996 ssize_t size;
997 int ret;
998
999 if (amdgpu_in_reset(adev))
1000 return -EPERM;
1001 if (adev->in_suspend && !adev->in_runpm)
1002 return -EPERM;
1003
1004 ret = pm_runtime_get_sync(ddev->dev);
1005 if (ret < 0) {
1006 pm_runtime_put_autosuspend(ddev->dev);
1007 return ret;
1008 }
1009
1010 if (adev->powerplay.pp_funcs->get_ppfeature_status)
1011 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
1012 else
1013 size = sysfs_emit(buf, "\n");
1014
1015 pm_runtime_mark_last_busy(ddev->dev);
1016 pm_runtime_put_autosuspend(ddev->dev);
1017
1018 return size;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1052 enum pp_clock_type type,
1053 char *buf)
1054{
1055 struct drm_device *ddev = dev_get_drvdata(dev);
1056 struct amdgpu_device *adev = drm_to_adev(ddev);
1057 ssize_t size;
1058 int ret;
1059
1060 if (amdgpu_in_reset(adev))
1061 return -EPERM;
1062 if (adev->in_suspend && !adev->in_runpm)
1063 return -EPERM;
1064
1065 ret = pm_runtime_get_sync(ddev->dev);
1066 if (ret < 0) {
1067 pm_runtime_put_autosuspend(ddev->dev);
1068 return ret;
1069 }
1070
1071 if (adev->powerplay.pp_funcs->print_clock_levels)
1072 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1073 else
1074 size = sysfs_emit(buf, "\n");
1075
1076 pm_runtime_mark_last_busy(ddev->dev);
1077 pm_runtime_put_autosuspend(ddev->dev);
1078
1079 return size;
1080}
1081
1082
1083
1084
1085
1086#define AMDGPU_MASK_BUF_MAX (32 * 13)
1087
1088static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1089{
1090 int ret;
1091 unsigned long level;
1092 char *sub_str = NULL;
1093 char *tmp;
1094 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1095 const char delimiter[3] = {' ', '\n', '\0'};
1096 size_t bytes;
1097
1098 *mask = 0;
1099
1100 bytes = min(count, sizeof(buf_cpy) - 1);
1101 memcpy(buf_cpy, buf, bytes);
1102 buf_cpy[bytes] = '\0';
1103 tmp = buf_cpy;
1104 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1105 if (strlen(sub_str)) {
1106 ret = kstrtoul(sub_str, 0, &level);
1107 if (ret || level > 31)
1108 return -EINVAL;
1109 *mask |= 1 << level;
1110 } else
1111 break;
1112 }
1113
1114 return 0;
1115}
1116
1117static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1118 enum pp_clock_type type,
1119 const char *buf,
1120 size_t count)
1121{
1122 struct drm_device *ddev = dev_get_drvdata(dev);
1123 struct amdgpu_device *adev = drm_to_adev(ddev);
1124 int ret;
1125 uint32_t mask = 0;
1126
1127 if (amdgpu_in_reset(adev))
1128 return -EPERM;
1129 if (adev->in_suspend && !adev->in_runpm)
1130 return -EPERM;
1131
1132 ret = amdgpu_read_mask(buf, count, &mask);
1133 if (ret)
1134 return ret;
1135
1136 ret = pm_runtime_get_sync(ddev->dev);
1137 if (ret < 0) {
1138 pm_runtime_put_autosuspend(ddev->dev);
1139 return ret;
1140 }
1141
1142 if (adev->powerplay.pp_funcs->force_clock_level)
1143 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1144 else
1145 ret = 0;
1146
1147 pm_runtime_mark_last_busy(ddev->dev);
1148 pm_runtime_put_autosuspend(ddev->dev);
1149
1150 if (ret)
1151 return -EINVAL;
1152
1153 return count;
1154}
1155
1156static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1157 struct device_attribute *attr,
1158 char *buf)
1159{
1160 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1161}
1162
1163static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1164 struct device_attribute *attr,
1165 const char *buf,
1166 size_t count)
1167{
1168 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1169}
1170
1171static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1172 struct device_attribute *attr,
1173 char *buf)
1174{
1175 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1176}
1177
1178static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1179 struct device_attribute *attr,
1180 const char *buf,
1181 size_t count)
1182{
1183 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1184}
1185
1186static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1187 struct device_attribute *attr,
1188 char *buf)
1189{
1190 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1191}
1192
1193static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1194 struct device_attribute *attr,
1195 const char *buf,
1196 size_t count)
1197{
1198 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1199}
1200
1201static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1202 struct device_attribute *attr,
1203 char *buf)
1204{
1205 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1206}
1207
1208static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1209 struct device_attribute *attr,
1210 const char *buf,
1211 size_t count)
1212{
1213 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1214}
1215
1216static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1217 struct device_attribute *attr,
1218 char *buf)
1219{
1220 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1221}
1222
1223static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1224 struct device_attribute *attr,
1225 const char *buf,
1226 size_t count)
1227{
1228 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1229}
1230
1231static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1232 struct device_attribute *attr,
1233 char *buf)
1234{
1235 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1236}
1237
1238static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1239 struct device_attribute *attr,
1240 const char *buf,
1241 size_t count)
1242{
1243 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1244}
1245
1246static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1247 struct device_attribute *attr,
1248 char *buf)
1249{
1250 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1251}
1252
1253static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1254 struct device_attribute *attr,
1255 const char *buf,
1256 size_t count)
1257{
1258 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1259}
1260
1261static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1262 struct device_attribute *attr,
1263 char *buf)
1264{
1265 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1266}
1267
1268static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1269 struct device_attribute *attr,
1270 const char *buf,
1271 size_t count)
1272{
1273 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1274}
1275
1276static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1277 struct device_attribute *attr,
1278 char *buf)
1279{
1280 struct drm_device *ddev = dev_get_drvdata(dev);
1281 struct amdgpu_device *adev = drm_to_adev(ddev);
1282 uint32_t value = 0;
1283 int ret;
1284
1285 if (amdgpu_in_reset(adev))
1286 return -EPERM;
1287 if (adev->in_suspend && !adev->in_runpm)
1288 return -EPERM;
1289
1290 ret = pm_runtime_get_sync(ddev->dev);
1291 if (ret < 0) {
1292 pm_runtime_put_autosuspend(ddev->dev);
1293 return ret;
1294 }
1295
1296 if (is_support_sw_smu(adev))
1297 value = 0;
1298 else if (adev->powerplay.pp_funcs->get_sclk_od)
1299 value = amdgpu_dpm_get_sclk_od(adev);
1300
1301 pm_runtime_mark_last_busy(ddev->dev);
1302 pm_runtime_put_autosuspend(ddev->dev);
1303
1304 return sysfs_emit(buf, "%d\n", value);
1305}
1306
1307static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1308 struct device_attribute *attr,
1309 const char *buf,
1310 size_t count)
1311{
1312 struct drm_device *ddev = dev_get_drvdata(dev);
1313 struct amdgpu_device *adev = drm_to_adev(ddev);
1314 int ret;
1315 long int value;
1316
1317 if (amdgpu_in_reset(adev))
1318 return -EPERM;
1319 if (adev->in_suspend && !adev->in_runpm)
1320 return -EPERM;
1321
1322 ret = kstrtol(buf, 0, &value);
1323
1324 if (ret)
1325 return -EINVAL;
1326
1327 ret = pm_runtime_get_sync(ddev->dev);
1328 if (ret < 0) {
1329 pm_runtime_put_autosuspend(ddev->dev);
1330 return ret;
1331 }
1332
1333 if (is_support_sw_smu(adev)) {
1334 value = 0;
1335 } else {
1336 if (adev->powerplay.pp_funcs->set_sclk_od)
1337 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1338
1339 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1340 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1341 } else {
1342 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1343 amdgpu_pm_compute_clocks(adev);
1344 }
1345 }
1346
1347 pm_runtime_mark_last_busy(ddev->dev);
1348 pm_runtime_put_autosuspend(ddev->dev);
1349
1350 return count;
1351}
1352
1353static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1354 struct device_attribute *attr,
1355 char *buf)
1356{
1357 struct drm_device *ddev = dev_get_drvdata(dev);
1358 struct amdgpu_device *adev = drm_to_adev(ddev);
1359 uint32_t value = 0;
1360 int ret;
1361
1362 if (amdgpu_in_reset(adev))
1363 return -EPERM;
1364 if (adev->in_suspend && !adev->in_runpm)
1365 return -EPERM;
1366
1367 ret = pm_runtime_get_sync(ddev->dev);
1368 if (ret < 0) {
1369 pm_runtime_put_autosuspend(ddev->dev);
1370 return ret;
1371 }
1372
1373 if (is_support_sw_smu(adev))
1374 value = 0;
1375 else if (adev->powerplay.pp_funcs->get_mclk_od)
1376 value = amdgpu_dpm_get_mclk_od(adev);
1377
1378 pm_runtime_mark_last_busy(ddev->dev);
1379 pm_runtime_put_autosuspend(ddev->dev);
1380
1381 return sysfs_emit(buf, "%d\n", value);
1382}
1383
1384static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1385 struct device_attribute *attr,
1386 const char *buf,
1387 size_t count)
1388{
1389 struct drm_device *ddev = dev_get_drvdata(dev);
1390 struct amdgpu_device *adev = drm_to_adev(ddev);
1391 int ret;
1392 long int value;
1393
1394 if (amdgpu_in_reset(adev))
1395 return -EPERM;
1396 if (adev->in_suspend && !adev->in_runpm)
1397 return -EPERM;
1398
1399 ret = kstrtol(buf, 0, &value);
1400
1401 if (ret)
1402 return -EINVAL;
1403
1404 ret = pm_runtime_get_sync(ddev->dev);
1405 if (ret < 0) {
1406 pm_runtime_put_autosuspend(ddev->dev);
1407 return ret;
1408 }
1409
1410 if (is_support_sw_smu(adev)) {
1411 value = 0;
1412 } else {
1413 if (adev->powerplay.pp_funcs->set_mclk_od)
1414 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1415
1416 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1417 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1418 } else {
1419 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1420 amdgpu_pm_compute_clocks(adev);
1421 }
1422 }
1423
1424 pm_runtime_mark_last_busy(ddev->dev);
1425 pm_runtime_put_autosuspend(ddev->dev);
1426
1427 return count;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1451 struct device_attribute *attr,
1452 char *buf)
1453{
1454 struct drm_device *ddev = dev_get_drvdata(dev);
1455 struct amdgpu_device *adev = drm_to_adev(ddev);
1456 ssize_t size;
1457 int ret;
1458
1459 if (amdgpu_in_reset(adev))
1460 return -EPERM;
1461 if (adev->in_suspend && !adev->in_runpm)
1462 return -EPERM;
1463
1464 ret = pm_runtime_get_sync(ddev->dev);
1465 if (ret < 0) {
1466 pm_runtime_put_autosuspend(ddev->dev);
1467 return ret;
1468 }
1469
1470 if (adev->powerplay.pp_funcs->get_power_profile_mode)
1471 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1472 else
1473 size = sysfs_emit(buf, "\n");
1474
1475 pm_runtime_mark_last_busy(ddev->dev);
1476 pm_runtime_put_autosuspend(ddev->dev);
1477
1478 return size;
1479}
1480
1481
1482static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1483 struct device_attribute *attr,
1484 const char *buf,
1485 size_t count)
1486{
1487 int ret;
1488 struct drm_device *ddev = dev_get_drvdata(dev);
1489 struct amdgpu_device *adev = drm_to_adev(ddev);
1490 uint32_t parameter_size = 0;
1491 long parameter[64];
1492 char *sub_str, buf_cpy[128];
1493 char *tmp_str;
1494 uint32_t i = 0;
1495 char tmp[2];
1496 long int profile_mode = 0;
1497 const char delimiter[3] = {' ', '\n', '\0'};
1498
1499 if (amdgpu_in_reset(adev))
1500 return -EPERM;
1501 if (adev->in_suspend && !adev->in_runpm)
1502 return -EPERM;
1503
1504 tmp[0] = *(buf);
1505 tmp[1] = '\0';
1506 ret = kstrtol(tmp, 0, &profile_mode);
1507 if (ret)
1508 return -EINVAL;
1509
1510 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1511 if (count < 2 || count > 127)
1512 return -EINVAL;
1513 while (isspace(*++buf))
1514 i++;
1515 memcpy(buf_cpy, buf, count-i);
1516 tmp_str = buf_cpy;
1517 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1518 if (strlen(sub_str) == 0)
1519 continue;
1520 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1521 if (ret)
1522 return -EINVAL;
1523 parameter_size++;
1524 while (isspace(*tmp_str))
1525 tmp_str++;
1526 }
1527 }
1528 parameter[parameter_size] = profile_mode;
1529
1530 ret = pm_runtime_get_sync(ddev->dev);
1531 if (ret < 0) {
1532 pm_runtime_put_autosuspend(ddev->dev);
1533 return ret;
1534 }
1535
1536 if (adev->powerplay.pp_funcs->set_power_profile_mode)
1537 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1538
1539 pm_runtime_mark_last_busy(ddev->dev);
1540 pm_runtime_put_autosuspend(ddev->dev);
1541
1542 if (!ret)
1543 return count;
1544
1545 return -EINVAL;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1557 struct device_attribute *attr,
1558 char *buf)
1559{
1560 struct drm_device *ddev = dev_get_drvdata(dev);
1561 struct amdgpu_device *adev = drm_to_adev(ddev);
1562 int r, value, size = sizeof(value);
1563
1564 if (amdgpu_in_reset(adev))
1565 return -EPERM;
1566 if (adev->in_suspend && !adev->in_runpm)
1567 return -EPERM;
1568
1569 r = pm_runtime_get_sync(ddev->dev);
1570 if (r < 0) {
1571 pm_runtime_put_autosuspend(ddev->dev);
1572 return r;
1573 }
1574
1575
1576 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1577 (void *)&value, &size);
1578
1579 pm_runtime_mark_last_busy(ddev->dev);
1580 pm_runtime_put_autosuspend(ddev->dev);
1581
1582 if (r)
1583 return r;
1584
1585 return sysfs_emit(buf, "%d\n", value);
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1597 struct device_attribute *attr,
1598 char *buf)
1599{
1600 struct drm_device *ddev = dev_get_drvdata(dev);
1601 struct amdgpu_device *adev = drm_to_adev(ddev);
1602 int r, value, size = sizeof(value);
1603
1604 if (amdgpu_in_reset(adev))
1605 return -EPERM;
1606 if (adev->in_suspend && !adev->in_runpm)
1607 return -EPERM;
1608
1609 r = pm_runtime_get_sync(ddev->dev);
1610 if (r < 0) {
1611 pm_runtime_put_autosuspend(ddev->dev);
1612 return r;
1613 }
1614
1615
1616 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1617 (void *)&value, &size);
1618
1619 pm_runtime_mark_last_busy(ddev->dev);
1620 pm_runtime_put_autosuspend(ddev->dev);
1621
1622 if (r)
1623 return r;
1624
1625 return sysfs_emit(buf, "%d\n", value);
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1641 struct device_attribute *attr,
1642 char *buf)
1643{
1644 struct drm_device *ddev = dev_get_drvdata(dev);
1645 struct amdgpu_device *adev = drm_to_adev(ddev);
1646 uint64_t count0 = 0, count1 = 0;
1647 int ret;
1648
1649 if (amdgpu_in_reset(adev))
1650 return -EPERM;
1651 if (adev->in_suspend && !adev->in_runpm)
1652 return -EPERM;
1653
1654 if (adev->flags & AMD_IS_APU)
1655 return -ENODATA;
1656
1657 if (!adev->asic_funcs->get_pcie_usage)
1658 return -ENODATA;
1659
1660 ret = pm_runtime_get_sync(ddev->dev);
1661 if (ret < 0) {
1662 pm_runtime_put_autosuspend(ddev->dev);
1663 return ret;
1664 }
1665
1666 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1667
1668 pm_runtime_mark_last_busy(ddev->dev);
1669 pm_runtime_put_autosuspend(ddev->dev);
1670
1671 return sysfs_emit(buf, "%llu %llu %i\n",
1672 count0, count1, pcie_get_mps(adev->pdev));
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static ssize_t amdgpu_get_unique_id(struct device *dev,
1686 struct device_attribute *attr,
1687 char *buf)
1688{
1689 struct drm_device *ddev = dev_get_drvdata(dev);
1690 struct amdgpu_device *adev = drm_to_adev(ddev);
1691
1692 if (amdgpu_in_reset(adev))
1693 return -EPERM;
1694 if (adev->in_suspend && !adev->in_runpm)
1695 return -EPERM;
1696
1697 if (adev->unique_id)
1698 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1699
1700 return 0;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1719 struct device_attribute *attr,
1720 char *buf)
1721{
1722 struct drm_device *ddev = dev_get_drvdata(dev);
1723 struct amdgpu_device *adev = drm_to_adev(ddev);
1724
1725 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1726 adev_to_drm(adev)->unique,
1727 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1728 adev->throttling_logging_rs.interval / HZ + 1);
1729}
1730
1731static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1732 struct device_attribute *attr,
1733 const char *buf,
1734 size_t count)
1735{
1736 struct drm_device *ddev = dev_get_drvdata(dev);
1737 struct amdgpu_device *adev = drm_to_adev(ddev);
1738 long throttling_logging_interval;
1739 unsigned long flags;
1740 int ret = 0;
1741
1742 ret = kstrtol(buf, 0, &throttling_logging_interval);
1743 if (ret)
1744 return ret;
1745
1746 if (throttling_logging_interval > 3600)
1747 return -EINVAL;
1748
1749 if (throttling_logging_interval > 0) {
1750 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1751
1752
1753
1754
1755 adev->throttling_logging_rs.interval =
1756 (throttling_logging_interval - 1) * HZ;
1757 adev->throttling_logging_rs.begin = 0;
1758 adev->throttling_logging_rs.printed = 0;
1759 adev->throttling_logging_rs.missed = 0;
1760 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1761
1762 atomic_set(&adev->throttling_logging_enabled, 1);
1763 } else {
1764 atomic_set(&adev->throttling_logging_enabled, 0);
1765 }
1766
1767 return count;
1768}
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1783 struct device_attribute *attr,
1784 char *buf)
1785{
1786 struct drm_device *ddev = dev_get_drvdata(dev);
1787 struct amdgpu_device *adev = drm_to_adev(ddev);
1788 void *gpu_metrics;
1789 ssize_t size = 0;
1790 int ret;
1791
1792 if (amdgpu_in_reset(adev))
1793 return -EPERM;
1794 if (adev->in_suspend && !adev->in_runpm)
1795 return -EPERM;
1796
1797 ret = pm_runtime_get_sync(ddev->dev);
1798 if (ret < 0) {
1799 pm_runtime_put_autosuspend(ddev->dev);
1800 return ret;
1801 }
1802
1803 if (adev->powerplay.pp_funcs->get_gpu_metrics)
1804 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1805
1806 if (size <= 0)
1807 goto out;
1808
1809 if (size >= PAGE_SIZE)
1810 size = PAGE_SIZE - 1;
1811
1812 memcpy(buf, gpu_metrics, size);
1813
1814out:
1815 pm_runtime_mark_last_busy(ddev->dev);
1816 pm_runtime_put_autosuspend(ddev->dev);
1817
1818 return size;
1819}
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1832 char *buf)
1833{
1834 struct drm_device *ddev = dev_get_drvdata(dev);
1835 struct amdgpu_device *adev = drm_to_adev(ddev);
1836 uint32_t ss_power, size;
1837 int r = 0;
1838
1839 if (amdgpu_in_reset(adev))
1840 return -EPERM;
1841 if (adev->in_suspend && !adev->in_runpm)
1842 return -EPERM;
1843
1844 r = pm_runtime_get_sync(ddev->dev);
1845 if (r < 0) {
1846 pm_runtime_put_autosuspend(ddev->dev);
1847 return r;
1848 }
1849
1850 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1851 (void *)&ss_power, &size);
1852 if (r)
1853 goto out;
1854
1855 r = sysfs_emit(buf, "%u%%\n", ss_power);
1856
1857out:
1858 pm_runtime_mark_last_busy(ddev->dev);
1859 pm_runtime_put_autosuspend(ddev->dev);
1860 return r;
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1875 char *buf)
1876{
1877 struct drm_device *ddev = dev_get_drvdata(dev);
1878 struct amdgpu_device *adev = drm_to_adev(ddev);
1879 uint32_t ss_power, size;
1880 int r = 0;
1881
1882 if (amdgpu_in_reset(adev))
1883 return -EPERM;
1884 if (adev->in_suspend && !adev->in_runpm)
1885 return -EPERM;
1886
1887 r = pm_runtime_get_sync(ddev->dev);
1888 if (r < 0) {
1889 pm_runtime_put_autosuspend(ddev->dev);
1890 return r;
1891 }
1892
1893 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1894 (void *)&ss_power, &size);
1895
1896 if (r)
1897 goto out;
1898
1899 r = sysfs_emit(buf, "%u%%\n", ss_power);
1900
1901out:
1902 pm_runtime_mark_last_busy(ddev->dev);
1903 pm_runtime_put_autosuspend(ddev->dev);
1904 return r;
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1917 struct device_attribute *attr,
1918 char *buf)
1919{
1920 int r = 0;
1921
1922 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1923
1924 return r;
1925}
1926
1927static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1928 struct device_attribute *attr,
1929 const char *buf, size_t count)
1930{
1931 struct drm_device *ddev = dev_get_drvdata(dev);
1932 struct amdgpu_device *adev = drm_to_adev(ddev);
1933 int r = 0;
1934 int bias = 0;
1935
1936 if (amdgpu_in_reset(adev))
1937 return -EPERM;
1938 if (adev->in_suspend && !adev->in_runpm)
1939 return -EPERM;
1940
1941 r = pm_runtime_get_sync(ddev->dev);
1942 if (r < 0) {
1943 pm_runtime_put_autosuspend(ddev->dev);
1944 return r;
1945 }
1946
1947 r = kstrtoint(buf, 10, &bias);
1948 if (r)
1949 goto out;
1950
1951 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1952 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1953 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1954 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1955
1956 amdgpu_smartshift_bias = bias;
1957 r = count;
1958
1959
1960
1961out:
1962 pm_runtime_mark_last_busy(ddev->dev);
1963 pm_runtime_put_autosuspend(ddev->dev);
1964 return r;
1965}
1966
1967
1968static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1969 uint32_t mask, enum amdgpu_device_attr_states *states)
1970{
1971 uint32_t ss_power, size;
1972
1973 if (!amdgpu_acpi_is_power_shift_control_supported())
1974 *states = ATTR_STATE_UNSUPPORTED;
1975 else if ((adev->flags & AMD_IS_PX) &&
1976 !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1977 *states = ATTR_STATE_UNSUPPORTED;
1978 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1979 (void *)&ss_power, &size))
1980 *states = ATTR_STATE_UNSUPPORTED;
1981 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1982 (void *)&ss_power, &size))
1983 *states = ATTR_STATE_UNSUPPORTED;
1984
1985 return 0;
1986}
1987
1988static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1989 uint32_t mask, enum amdgpu_device_attr_states *states)
1990{
1991 uint32_t ss_power, size;
1992
1993 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1994 *states = ATTR_STATE_UNSUPPORTED;
1995 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1996 (void *)&ss_power, &size))
1997 *states = ATTR_STATE_UNSUPPORTED;
1998 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1999 (void *)&ss_power, &size))
2000 *states = ATTR_STATE_UNSUPPORTED;
2001
2002 return 0;
2003}
2004
2005static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2006 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2007 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2008 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2009 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2010 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2011 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2012 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2013 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2014 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2015 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2016 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2017 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2018 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2019 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2020 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2021 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2022 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
2023 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2024 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
2025 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
2026 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2027 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
2028 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
2029 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
2030 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
2031 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2032 .attr_update = ss_power_attr_update),
2033 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2034 .attr_update = ss_power_attr_update),
2035 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2036 .attr_update = ss_bias_attr_update),
2037};
2038
2039static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2040 uint32_t mask, enum amdgpu_device_attr_states *states)
2041{
2042 struct device_attribute *dev_attr = &attr->dev_attr;
2043 const char *attr_name = dev_attr->attr.name;
2044 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2045 enum amd_asic_type asic_type = adev->asic_type;
2046
2047 if (!(attr->flags & mask)) {
2048 *states = ATTR_STATE_UNSUPPORTED;
2049 return 0;
2050 }
2051
2052#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2053
2054 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2055 if (asic_type < CHIP_VEGA10)
2056 *states = ATTR_STATE_UNSUPPORTED;
2057 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2058 if (asic_type < CHIP_VEGA10 ||
2059 asic_type == CHIP_ARCTURUS ||
2060 asic_type == CHIP_ALDEBARAN)
2061 *states = ATTR_STATE_UNSUPPORTED;
2062 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2063 if (asic_type < CHIP_VEGA20)
2064 *states = ATTR_STATE_UNSUPPORTED;
2065 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2066 *states = ATTR_STATE_UNSUPPORTED;
2067 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2068 (is_support_sw_smu(adev) && adev->smu.is_apu) ||
2069 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2070 *states = ATTR_STATE_SUPPORTED;
2071 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2072 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2073 *states = ATTR_STATE_UNSUPPORTED;
2074 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2075
2076 if (adev->flags & AMD_IS_APU)
2077 *states = ATTR_STATE_UNSUPPORTED;
2078 } else if (DEVICE_ATTR_IS(unique_id)) {
2079 if (asic_type != CHIP_VEGA10 &&
2080 asic_type != CHIP_VEGA20 &&
2081 asic_type != CHIP_ARCTURUS)
2082 *states = ATTR_STATE_UNSUPPORTED;
2083 } else if (DEVICE_ATTR_IS(pp_features)) {
2084 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2085 *states = ATTR_STATE_UNSUPPORTED;
2086 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2087 if (asic_type < CHIP_VEGA12)
2088 *states = ATTR_STATE_UNSUPPORTED;
2089 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2090 if (!(asic_type == CHIP_VANGOGH))
2091 *states = ATTR_STATE_UNSUPPORTED;
2092 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2093 if (!(asic_type == CHIP_VANGOGH))
2094 *states = ATTR_STATE_UNSUPPORTED;
2095 }
2096
2097 switch (asic_type) {
2098 case CHIP_ARCTURUS:
2099 case CHIP_ALDEBARAN:
2100
2101 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2102 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2103 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2104 dev_attr->attr.mode &= ~S_IWUGO;
2105 dev_attr->store = NULL;
2106 }
2107 break;
2108 default:
2109 break;
2110 }
2111
2112 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2113
2114 if (asic_type >= CHIP_NAVI10) {
2115 dev_attr->attr.mode &= ~S_IWUGO;
2116 dev_attr->store = NULL;
2117 }
2118 }
2119
2120#undef DEVICE_ATTR_IS
2121
2122 return 0;
2123}
2124
2125
2126static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2127 struct amdgpu_device_attr *attr,
2128 uint32_t mask, struct list_head *attr_list)
2129{
2130 int ret = 0;
2131 struct device_attribute *dev_attr = &attr->dev_attr;
2132 const char *name = dev_attr->attr.name;
2133 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2134 struct amdgpu_device_attr_entry *attr_entry;
2135
2136 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2137 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2138
2139 BUG_ON(!attr);
2140
2141 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2142
2143 ret = attr_update(adev, attr, mask, &attr_states);
2144 if (ret) {
2145 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2146 name, ret);
2147 return ret;
2148 }
2149
2150 if (attr_states == ATTR_STATE_UNSUPPORTED)
2151 return 0;
2152
2153 ret = device_create_file(adev->dev, dev_attr);
2154 if (ret) {
2155 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2156 name, ret);
2157 }
2158
2159 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2160 if (!attr_entry)
2161 return -ENOMEM;
2162
2163 attr_entry->attr = attr;
2164 INIT_LIST_HEAD(&attr_entry->entry);
2165
2166 list_add_tail(&attr_entry->entry, attr_list);
2167
2168 return ret;
2169}
2170
2171static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2172{
2173 struct device_attribute *dev_attr = &attr->dev_attr;
2174
2175 device_remove_file(adev->dev, dev_attr);
2176}
2177
2178static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2179 struct list_head *attr_list);
2180
2181static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2182 struct amdgpu_device_attr *attrs,
2183 uint32_t counts,
2184 uint32_t mask,
2185 struct list_head *attr_list)
2186{
2187 int ret = 0;
2188 uint32_t i = 0;
2189
2190 for (i = 0; i < counts; i++) {
2191 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2192 if (ret)
2193 goto failed;
2194 }
2195
2196 return 0;
2197
2198failed:
2199 amdgpu_device_attr_remove_groups(adev, attr_list);
2200
2201 return ret;
2202}
2203
2204static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2205 struct list_head *attr_list)
2206{
2207 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2208
2209 if (list_empty(attr_list))
2210 return ;
2211
2212 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2213 amdgpu_device_attr_remove(adev, entry->attr);
2214 list_del(&entry->entry);
2215 kfree(entry);
2216 }
2217}
2218
2219static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2220 struct device_attribute *attr,
2221 char *buf)
2222{
2223 struct amdgpu_device *adev = dev_get_drvdata(dev);
2224 int channel = to_sensor_dev_attr(attr)->index;
2225 int r, temp = 0, size = sizeof(temp);
2226
2227 if (amdgpu_in_reset(adev))
2228 return -EPERM;
2229 if (adev->in_suspend && !adev->in_runpm)
2230 return -EPERM;
2231
2232 if (channel >= PP_TEMP_MAX)
2233 return -EINVAL;
2234
2235 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2236 if (r < 0) {
2237 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2238 return r;
2239 }
2240
2241 switch (channel) {
2242 case PP_TEMP_JUNCTION:
2243
2244 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2245 (void *)&temp, &size);
2246 break;
2247 case PP_TEMP_EDGE:
2248
2249 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2250 (void *)&temp, &size);
2251 break;
2252 case PP_TEMP_MEM:
2253
2254 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2255 (void *)&temp, &size);
2256 break;
2257 default:
2258 r = -EINVAL;
2259 break;
2260 }
2261
2262 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2263 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2264
2265 if (r)
2266 return r;
2267
2268 return sysfs_emit(buf, "%d\n", temp);
2269}
2270
2271static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2272 struct device_attribute *attr,
2273 char *buf)
2274{
2275 struct amdgpu_device *adev = dev_get_drvdata(dev);
2276 int hyst = to_sensor_dev_attr(attr)->index;
2277 int temp;
2278
2279 if (hyst)
2280 temp = adev->pm.dpm.thermal.min_temp;
2281 else
2282 temp = adev->pm.dpm.thermal.max_temp;
2283
2284 return sysfs_emit(buf, "%d\n", temp);
2285}
2286
2287static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2288 struct device_attribute *attr,
2289 char *buf)
2290{
2291 struct amdgpu_device *adev = dev_get_drvdata(dev);
2292 int hyst = to_sensor_dev_attr(attr)->index;
2293 int temp;
2294
2295 if (hyst)
2296 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2297 else
2298 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2299
2300 return sysfs_emit(buf, "%d\n", temp);
2301}
2302
2303static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2304 struct device_attribute *attr,
2305 char *buf)
2306{
2307 struct amdgpu_device *adev = dev_get_drvdata(dev);
2308 int hyst = to_sensor_dev_attr(attr)->index;
2309 int temp;
2310
2311 if (hyst)
2312 temp = adev->pm.dpm.thermal.min_mem_temp;
2313 else
2314 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2315
2316 return sysfs_emit(buf, "%d\n", temp);
2317}
2318
2319static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2320 struct device_attribute *attr,
2321 char *buf)
2322{
2323 int channel = to_sensor_dev_attr(attr)->index;
2324
2325 if (channel >= PP_TEMP_MAX)
2326 return -EINVAL;
2327
2328 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2329}
2330
2331static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2332 struct device_attribute *attr,
2333 char *buf)
2334{
2335 struct amdgpu_device *adev = dev_get_drvdata(dev);
2336 int channel = to_sensor_dev_attr(attr)->index;
2337 int temp = 0;
2338
2339 if (channel >= PP_TEMP_MAX)
2340 return -EINVAL;
2341
2342 switch (channel) {
2343 case PP_TEMP_JUNCTION:
2344 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2345 break;
2346 case PP_TEMP_EDGE:
2347 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2348 break;
2349 case PP_TEMP_MEM:
2350 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2351 break;
2352 }
2353
2354 return sysfs_emit(buf, "%d\n", temp);
2355}
2356
2357static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2358 struct device_attribute *attr,
2359 char *buf)
2360{
2361 struct amdgpu_device *adev = dev_get_drvdata(dev);
2362 u32 pwm_mode = 0;
2363 int ret;
2364
2365 if (amdgpu_in_reset(adev))
2366 return -EPERM;
2367 if (adev->in_suspend && !adev->in_runpm)
2368 return -EPERM;
2369
2370 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2371 if (ret < 0) {
2372 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2373 return ret;
2374 }
2375
2376 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2377 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2378 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2379 return -EINVAL;
2380 }
2381
2382 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2383
2384 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2385 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2386
2387 return sysfs_emit(buf, "%u\n", pwm_mode);
2388}
2389
2390static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2391 struct device_attribute *attr,
2392 const char *buf,
2393 size_t count)
2394{
2395 struct amdgpu_device *adev = dev_get_drvdata(dev);
2396 int err, ret;
2397 int value;
2398
2399 if (amdgpu_in_reset(adev))
2400 return -EPERM;
2401 if (adev->in_suspend && !adev->in_runpm)
2402 return -EPERM;
2403
2404 err = kstrtoint(buf, 10, &value);
2405 if (err)
2406 return err;
2407
2408 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2409 if (ret < 0) {
2410 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2411 return ret;
2412 }
2413
2414 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2415 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2416 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2417 return -EINVAL;
2418 }
2419
2420 amdgpu_dpm_set_fan_control_mode(adev, value);
2421
2422 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2423 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2424
2425 return count;
2426}
2427
2428static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2429 struct device_attribute *attr,
2430 char *buf)
2431{
2432 return sysfs_emit(buf, "%i\n", 0);
2433}
2434
2435static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2436 struct device_attribute *attr,
2437 char *buf)
2438{
2439 return sysfs_emit(buf, "%i\n", 255);
2440}
2441
2442static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2443 struct device_attribute *attr,
2444 const char *buf, size_t count)
2445{
2446 struct amdgpu_device *adev = dev_get_drvdata(dev);
2447 int err;
2448 u32 value;
2449 u32 pwm_mode;
2450
2451 if (amdgpu_in_reset(adev))
2452 return -EPERM;
2453 if (adev->in_suspend && !adev->in_runpm)
2454 return -EPERM;
2455
2456 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2457 if (err < 0) {
2458 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2459 return err;
2460 }
2461
2462 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2463 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2464 pr_info("manual fan speed control should be enabled first\n");
2465 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2466 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2467 return -EINVAL;
2468 }
2469
2470 err = kstrtou32(buf, 10, &value);
2471 if (err) {
2472 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2473 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2474 return err;
2475 }
2476
2477 if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
2478 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2479 else
2480 err = -EINVAL;
2481
2482 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2483 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2484
2485 if (err)
2486 return err;
2487
2488 return count;
2489}
2490
2491static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2492 struct device_attribute *attr,
2493 char *buf)
2494{
2495 struct amdgpu_device *adev = dev_get_drvdata(dev);
2496 int err;
2497 u32 speed = 0;
2498
2499 if (amdgpu_in_reset(adev))
2500 return -EPERM;
2501 if (adev->in_suspend && !adev->in_runpm)
2502 return -EPERM;
2503
2504 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2505 if (err < 0) {
2506 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2507 return err;
2508 }
2509
2510 if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
2511 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2512 else
2513 err = -EINVAL;
2514
2515 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2516 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2517
2518 if (err)
2519 return err;
2520
2521 return sysfs_emit(buf, "%i\n", speed);
2522}
2523
2524static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2525 struct device_attribute *attr,
2526 char *buf)
2527{
2528 struct amdgpu_device *adev = dev_get_drvdata(dev);
2529 int err;
2530 u32 speed = 0;
2531
2532 if (amdgpu_in_reset(adev))
2533 return -EPERM;
2534 if (adev->in_suspend && !adev->in_runpm)
2535 return -EPERM;
2536
2537 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2538 if (err < 0) {
2539 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2540 return err;
2541 }
2542
2543 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2544 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2545 else
2546 err = -EINVAL;
2547
2548 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2549 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2550
2551 if (err)
2552 return err;
2553
2554 return sysfs_emit(buf, "%i\n", speed);
2555}
2556
2557static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2558 struct device_attribute *attr,
2559 char *buf)
2560{
2561 struct amdgpu_device *adev = dev_get_drvdata(dev);
2562 u32 min_rpm = 0;
2563 u32 size = sizeof(min_rpm);
2564 int r;
2565
2566 if (amdgpu_in_reset(adev))
2567 return -EPERM;
2568 if (adev->in_suspend && !adev->in_runpm)
2569 return -EPERM;
2570
2571 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2572 if (r < 0) {
2573 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2574 return r;
2575 }
2576
2577 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2578 (void *)&min_rpm, &size);
2579
2580 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2581 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2582
2583 if (r)
2584 return r;
2585
2586 return sysfs_emit(buf, "%d\n", min_rpm);
2587}
2588
2589static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2590 struct device_attribute *attr,
2591 char *buf)
2592{
2593 struct amdgpu_device *adev = dev_get_drvdata(dev);
2594 u32 max_rpm = 0;
2595 u32 size = sizeof(max_rpm);
2596 int r;
2597
2598 if (amdgpu_in_reset(adev))
2599 return -EPERM;
2600 if (adev->in_suspend && !adev->in_runpm)
2601 return -EPERM;
2602
2603 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2604 if (r < 0) {
2605 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2606 return r;
2607 }
2608
2609 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2610 (void *)&max_rpm, &size);
2611
2612 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2613 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2614
2615 if (r)
2616 return r;
2617
2618 return sysfs_emit(buf, "%d\n", max_rpm);
2619}
2620
2621static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2622 struct device_attribute *attr,
2623 char *buf)
2624{
2625 struct amdgpu_device *adev = dev_get_drvdata(dev);
2626 int err;
2627 u32 rpm = 0;
2628
2629 if (amdgpu_in_reset(adev))
2630 return -EPERM;
2631 if (adev->in_suspend && !adev->in_runpm)
2632 return -EPERM;
2633
2634 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2635 if (err < 0) {
2636 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2637 return err;
2638 }
2639
2640 if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2641 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2642 else
2643 err = -EINVAL;
2644
2645 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2646 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2647
2648 if (err)
2649 return err;
2650
2651 return sysfs_emit(buf, "%i\n", rpm);
2652}
2653
2654static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2655 struct device_attribute *attr,
2656 const char *buf, size_t count)
2657{
2658 struct amdgpu_device *adev = dev_get_drvdata(dev);
2659 int err;
2660 u32 value;
2661 u32 pwm_mode;
2662
2663 if (amdgpu_in_reset(adev))
2664 return -EPERM;
2665 if (adev->in_suspend && !adev->in_runpm)
2666 return -EPERM;
2667
2668 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2669 if (err < 0) {
2670 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2671 return err;
2672 }
2673
2674 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2675
2676 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2677 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2678 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2679 return -ENODATA;
2680 }
2681
2682 err = kstrtou32(buf, 10, &value);
2683 if (err) {
2684 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2685 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2686 return err;
2687 }
2688
2689 if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2690 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2691 else
2692 err = -EINVAL;
2693
2694 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2695 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2696
2697 if (err)
2698 return err;
2699
2700 return count;
2701}
2702
2703static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2704 struct device_attribute *attr,
2705 char *buf)
2706{
2707 struct amdgpu_device *adev = dev_get_drvdata(dev);
2708 u32 pwm_mode = 0;
2709 int ret;
2710
2711 if (amdgpu_in_reset(adev))
2712 return -EPERM;
2713 if (adev->in_suspend && !adev->in_runpm)
2714 return -EPERM;
2715
2716 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2717 if (ret < 0) {
2718 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2719 return ret;
2720 }
2721
2722 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2723 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2724 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2725 return -EINVAL;
2726 }
2727
2728 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2729
2730 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2731 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2732
2733 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2734}
2735
2736static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2737 struct device_attribute *attr,
2738 const char *buf,
2739 size_t count)
2740{
2741 struct amdgpu_device *adev = dev_get_drvdata(dev);
2742 int err;
2743 int value;
2744 u32 pwm_mode;
2745
2746 if (amdgpu_in_reset(adev))
2747 return -EPERM;
2748 if (adev->in_suspend && !adev->in_runpm)
2749 return -EPERM;
2750
2751 err = kstrtoint(buf, 10, &value);
2752 if (err)
2753 return err;
2754
2755 if (value == 0)
2756 pwm_mode = AMD_FAN_CTRL_AUTO;
2757 else if (value == 1)
2758 pwm_mode = AMD_FAN_CTRL_MANUAL;
2759 else
2760 return -EINVAL;
2761
2762 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2763 if (err < 0) {
2764 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2765 return err;
2766 }
2767
2768 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2769 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2770 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2771 return -EINVAL;
2772 }
2773 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2774
2775 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2776 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2777
2778 return count;
2779}
2780
2781static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2782 struct device_attribute *attr,
2783 char *buf)
2784{
2785 struct amdgpu_device *adev = dev_get_drvdata(dev);
2786 u32 vddgfx;
2787 int r, size = sizeof(vddgfx);
2788
2789 if (amdgpu_in_reset(adev))
2790 return -EPERM;
2791 if (adev->in_suspend && !adev->in_runpm)
2792 return -EPERM;
2793
2794 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2795 if (r < 0) {
2796 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2797 return r;
2798 }
2799
2800
2801 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2802 (void *)&vddgfx, &size);
2803
2804 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2805 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2806
2807 if (r)
2808 return r;
2809
2810 return sysfs_emit(buf, "%d\n", vddgfx);
2811}
2812
2813static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2814 struct device_attribute *attr,
2815 char *buf)
2816{
2817 return sysfs_emit(buf, "vddgfx\n");
2818}
2819
2820static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2821 struct device_attribute *attr,
2822 char *buf)
2823{
2824 struct amdgpu_device *adev = dev_get_drvdata(dev);
2825 u32 vddnb;
2826 int r, size = sizeof(vddnb);
2827
2828 if (amdgpu_in_reset(adev))
2829 return -EPERM;
2830 if (adev->in_suspend && !adev->in_runpm)
2831 return -EPERM;
2832
2833
2834 if (!(adev->flags & AMD_IS_APU))
2835 return -EINVAL;
2836
2837 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2838 if (r < 0) {
2839 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2840 return r;
2841 }
2842
2843
2844 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2845 (void *)&vddnb, &size);
2846
2847 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2848 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2849
2850 if (r)
2851 return r;
2852
2853 return sysfs_emit(buf, "%d\n", vddnb);
2854}
2855
2856static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2857 struct device_attribute *attr,
2858 char *buf)
2859{
2860 return sysfs_emit(buf, "vddnb\n");
2861}
2862
2863static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2864 struct device_attribute *attr,
2865 char *buf)
2866{
2867 struct amdgpu_device *adev = dev_get_drvdata(dev);
2868 u32 query = 0;
2869 int r, size = sizeof(u32);
2870 unsigned uw;
2871
2872 if (amdgpu_in_reset(adev))
2873 return -EPERM;
2874 if (adev->in_suspend && !adev->in_runpm)
2875 return -EPERM;
2876
2877 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2878 if (r < 0) {
2879 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2880 return r;
2881 }
2882
2883
2884 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2885 (void *)&query, &size);
2886
2887 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2888 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2889
2890 if (r)
2891 return r;
2892
2893
2894 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2895
2896 return sysfs_emit(buf, "%u\n", uw);
2897}
2898
2899static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2900 struct device_attribute *attr,
2901 char *buf)
2902{
2903 return sysfs_emit(buf, "%i\n", 0);
2904}
2905
2906
2907static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2908 struct device_attribute *attr,
2909 char *buf,
2910 enum pp_power_limit_level pp_limit_level)
2911{
2912 struct amdgpu_device *adev = dev_get_drvdata(dev);
2913 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2914 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2915 uint32_t limit;
2916 ssize_t size;
2917 int r;
2918
2919 if (amdgpu_in_reset(adev))
2920 return -EPERM;
2921 if (adev->in_suspend && !adev->in_runpm)
2922 return -EPERM;
2923
2924 if ( !(pp_funcs && pp_funcs->get_power_limit))
2925 return -ENODATA;
2926
2927 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2928 if (r < 0) {
2929 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2930 return r;
2931 }
2932
2933 r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
2934 pp_limit_level, power_type);
2935
2936 if (!r)
2937 size = sysfs_emit(buf, "%u\n", limit * 1000000);
2938 else
2939 size = sysfs_emit(buf, "\n");
2940
2941 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2942 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2943
2944 return size;
2945}
2946
2947
2948static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2949 struct device_attribute *attr,
2950 char *buf)
2951{
2952 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2953
2954}
2955
2956static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2957 struct device_attribute *attr,
2958 char *buf)
2959{
2960 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2961
2962}
2963
2964static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2965 struct device_attribute *attr,
2966 char *buf)
2967{
2968 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2969
2970}
2971
2972static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2973 struct device_attribute *attr,
2974 char *buf)
2975{
2976 int limit_type = to_sensor_dev_attr(attr)->index;
2977
2978 return sysfs_emit(buf, "%s\n",
2979 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
2980}
2981
2982static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2983 struct device_attribute *attr,
2984 const char *buf,
2985 size_t count)
2986{
2987 struct amdgpu_device *adev = dev_get_drvdata(dev);
2988 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
2989 int limit_type = to_sensor_dev_attr(attr)->index;
2990 int err;
2991 u32 value;
2992
2993 if (amdgpu_in_reset(adev))
2994 return -EPERM;
2995 if (adev->in_suspend && !adev->in_runpm)
2996 return -EPERM;
2997
2998 if (amdgpu_sriov_vf(adev))
2999 return -EINVAL;
3000
3001 err = kstrtou32(buf, 10, &value);
3002 if (err)
3003 return err;
3004
3005 value = value / 1000000;
3006 value |= limit_type << 24;
3007
3008 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3009 if (err < 0) {
3010 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3011 return err;
3012 }
3013
3014 if (pp_funcs && pp_funcs->set_power_limit)
3015 err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
3016 else
3017 err = -EINVAL;
3018
3019 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3020 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3021
3022 if (err)
3023 return err;
3024
3025 return count;
3026}
3027
3028static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3029 struct device_attribute *attr,
3030 char *buf)
3031{
3032 struct amdgpu_device *adev = dev_get_drvdata(dev);
3033 uint32_t sclk;
3034 int r, size = sizeof(sclk);
3035
3036 if (amdgpu_in_reset(adev))
3037 return -EPERM;
3038 if (adev->in_suspend && !adev->in_runpm)
3039 return -EPERM;
3040
3041 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3042 if (r < 0) {
3043 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3044 return r;
3045 }
3046
3047
3048 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3049 (void *)&sclk, &size);
3050
3051 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3052 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3053
3054 if (r)
3055 return r;
3056
3057 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3058}
3059
3060static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3061 struct device_attribute *attr,
3062 char *buf)
3063{
3064 return sysfs_emit(buf, "sclk\n");
3065}
3066
3067static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3068 struct device_attribute *attr,
3069 char *buf)
3070{
3071 struct amdgpu_device *adev = dev_get_drvdata(dev);
3072 uint32_t mclk;
3073 int r, size = sizeof(mclk);
3074
3075 if (amdgpu_in_reset(adev))
3076 return -EPERM;
3077 if (adev->in_suspend && !adev->in_runpm)
3078 return -EPERM;
3079
3080 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3081 if (r < 0) {
3082 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3083 return r;
3084 }
3085
3086
3087 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3088 (void *)&mclk, &size);
3089
3090 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3091 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3092
3093 if (r)
3094 return r;
3095
3096 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3097}
3098
3099static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3100 struct device_attribute *attr,
3101 char *buf)
3102{
3103 return sysfs_emit(buf, "mclk\n");
3104}
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3192static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3193static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3194static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3195static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3196static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3197static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3198static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3199static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3200static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3201static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3202static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3203static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3204static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3205static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3206static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3207static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3208static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3209static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3210static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3211static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3212static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3213static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3214static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3215static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3216static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3217static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3218static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3219static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3220static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3221static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3222static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3223static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3224static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3225static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3226static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3227static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3228static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3229static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3230static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3231static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3232static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3233static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3234static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3235
3236static struct attribute *hwmon_attributes[] = {
3237 &sensor_dev_attr_temp1_input.dev_attr.attr,
3238 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3239 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3240 &sensor_dev_attr_temp2_input.dev_attr.attr,
3241 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3242 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3243 &sensor_dev_attr_temp3_input.dev_attr.attr,
3244 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3245 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3246 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3247 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3248 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3249 &sensor_dev_attr_temp1_label.dev_attr.attr,
3250 &sensor_dev_attr_temp2_label.dev_attr.attr,
3251 &sensor_dev_attr_temp3_label.dev_attr.attr,
3252 &sensor_dev_attr_pwm1.dev_attr.attr,
3253 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3254 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3255 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3256 &sensor_dev_attr_fan1_input.dev_attr.attr,
3257 &sensor_dev_attr_fan1_min.dev_attr.attr,
3258 &sensor_dev_attr_fan1_max.dev_attr.attr,
3259 &sensor_dev_attr_fan1_target.dev_attr.attr,
3260 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3261 &sensor_dev_attr_in0_input.dev_attr.attr,
3262 &sensor_dev_attr_in0_label.dev_attr.attr,
3263 &sensor_dev_attr_in1_input.dev_attr.attr,
3264 &sensor_dev_attr_in1_label.dev_attr.attr,
3265 &sensor_dev_attr_power1_average.dev_attr.attr,
3266 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3267 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3268 &sensor_dev_attr_power1_cap.dev_attr.attr,
3269 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3270 &sensor_dev_attr_power1_label.dev_attr.attr,
3271 &sensor_dev_attr_power2_average.dev_attr.attr,
3272 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3273 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3274 &sensor_dev_attr_power2_cap.dev_attr.attr,
3275 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3276 &sensor_dev_attr_power2_label.dev_attr.attr,
3277 &sensor_dev_attr_freq1_input.dev_attr.attr,
3278 &sensor_dev_attr_freq1_label.dev_attr.attr,
3279 &sensor_dev_attr_freq2_input.dev_attr.attr,
3280 &sensor_dev_attr_freq2_label.dev_attr.attr,
3281 NULL
3282};
3283
3284static umode_t hwmon_attributes_visible(struct kobject *kobj,
3285 struct attribute *attr, int index)
3286{
3287 struct device *dev = kobj_to_dev(kobj);
3288 struct amdgpu_device *adev = dev_get_drvdata(dev);
3289 umode_t effective_mode = attr->mode;
3290
3291
3292 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3293 return 0;
3294
3295
3296 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3297 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3298 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3299 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3300 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3301 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3302 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3303 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3304 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3305 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3306 return 0;
3307
3308
3309 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3310 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3311 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3312 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3313 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3314 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3315 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3316 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3317 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3318 return 0;
3319
3320
3321 if ((adev->flags & AMD_IS_APU) &&
3322 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3323 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3324 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3325 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3326 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3327 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3328 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3329 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3330 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3331 return 0;
3332
3333
3334 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3335 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3336 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3337 return 0;
3338
3339
3340 if (!adev->pm.dpm_enabled &&
3341 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3342 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3343 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3344 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3345 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3346 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3347 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3348 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3349 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3350 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3351 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3352 return 0;
3353
3354 if (!is_support_sw_smu(adev)) {
3355
3356 if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
3357 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3358 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3359 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3360 effective_mode &= ~S_IRUGO;
3361
3362 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
3363 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3364 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3365 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3366 effective_mode &= ~S_IWUSR;
3367 }
3368
3369 if (((adev->family == AMDGPU_FAMILY_SI) ||
3370 ((adev->flags & AMD_IS_APU) &&
3371 (adev->asic_type != CHIP_VANGOGH))) &&
3372 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3373 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3374 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3375 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3376 return 0;
3377
3378 if (((adev->family == AMDGPU_FAMILY_SI) ||
3379 ((adev->flags & AMD_IS_APU) &&
3380 (adev->asic_type < CHIP_RENOIR))) &&
3381 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3382 return 0;
3383
3384 if (!is_support_sw_smu(adev)) {
3385
3386 if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
3387 !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
3388 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3389 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3390 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3391 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3392 return 0;
3393
3394 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3395 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3396 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3397 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3398 return 0;
3399 }
3400
3401 if ((adev->family == AMDGPU_FAMILY_SI ||
3402 adev->family == AMDGPU_FAMILY_KV) &&
3403 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3404 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3405 return 0;
3406
3407
3408 if (!(adev->flags & AMD_IS_APU) &&
3409 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3410 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3411 return 0;
3412
3413
3414 if ((adev->flags & AMD_IS_APU) &&
3415 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3416 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3417 return 0;
3418
3419
3420 if (((adev->flags & AMD_IS_APU) ||
3421 adev->asic_type < CHIP_VEGA10) &&
3422 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3423 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3424 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3425 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3426 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3427 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3428 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3429 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3430 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3431 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3432 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3433 return 0;
3434
3435
3436 if (!(adev->asic_type == CHIP_VANGOGH) &&
3437 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3438 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3439 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3440 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3441 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3442 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
3443 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
3444 return 0;
3445
3446 return effective_mode;
3447}
3448
3449static const struct attribute_group hwmon_attrgroup = {
3450 .attrs = hwmon_attributes,
3451 .is_visible = hwmon_attributes_visible,
3452};
3453
3454static const struct attribute_group *hwmon_groups[] = {
3455 &hwmon_attrgroup,
3456 NULL
3457};
3458
3459int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3460{
3461 int ret;
3462 uint32_t mask = 0;
3463
3464 if (adev->pm.sysfs_initialized)
3465 return 0;
3466
3467 if (adev->pm.dpm_enabled == 0)
3468 return 0;
3469
3470 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3471
3472 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3473 DRIVER_NAME, adev,
3474 hwmon_groups);
3475 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3476 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3477 dev_err(adev->dev,
3478 "Unable to register hwmon device: %d\n", ret);
3479 return ret;
3480 }
3481
3482 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3483 case SRIOV_VF_MODE_ONE_VF:
3484 mask = ATTR_FLAG_ONEVF;
3485 break;
3486 case SRIOV_VF_MODE_MULTI_VF:
3487 mask = 0;
3488 break;
3489 case SRIOV_VF_MODE_BARE_METAL:
3490 default:
3491 mask = ATTR_FLAG_MASK_ALL;
3492 break;
3493 }
3494
3495 ret = amdgpu_device_attr_create_groups(adev,
3496 amdgpu_device_attrs,
3497 ARRAY_SIZE(amdgpu_device_attrs),
3498 mask,
3499 &adev->pm.pm_attr_list);
3500 if (ret)
3501 return ret;
3502
3503 adev->pm.sysfs_initialized = true;
3504
3505 return 0;
3506}
3507
3508void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3509{
3510 if (adev->pm.dpm_enabled == 0)
3511 return;
3512
3513 if (adev->pm.int_hwmon_dev)
3514 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3515
3516 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3517}
3518
3519
3520
3521
3522#if defined(CONFIG_DEBUG_FS)
3523
3524static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3525 struct amdgpu_device *adev) {
3526 uint16_t *p_val;
3527 uint32_t size;
3528 int i;
3529
3530 if (is_support_cclk_dpm(adev)) {
3531 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
3532 GFP_KERNEL);
3533
3534 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3535 (void *)p_val, &size)) {
3536 for (i = 0; i < adev->smu.cpu_core_num; i++)
3537 seq_printf(m, "\t%u MHz (CPU%d)\n",
3538 *(p_val + i), i);
3539 }
3540
3541 kfree(p_val);
3542 }
3543}
3544
3545static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3546{
3547 uint32_t value;
3548 uint64_t value64 = 0;
3549 uint32_t query = 0;
3550 int size;
3551
3552
3553 size = sizeof(value);
3554 seq_printf(m, "GFX Clocks and Power:\n");
3555
3556 amdgpu_debugfs_prints_cpu_info(m, adev);
3557
3558 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3559 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3560 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3561 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3562 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3563 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3564 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3565 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3566 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3567 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3568 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3569 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3570 size = sizeof(uint32_t);
3571 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3572 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3573 size = sizeof(value);
3574 seq_printf(m, "\n");
3575
3576
3577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3578 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3579
3580
3581 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3582 seq_printf(m, "GPU Load: %u %%\n", value);
3583
3584 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3585 seq_printf(m, "MEM Load: %u %%\n", value);
3586
3587 seq_printf(m, "\n");
3588
3589
3590 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3591 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3592
3593 if (adev->asic_type > CHIP_VEGA20) {
3594
3595 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3596 if (!value) {
3597 seq_printf(m, "VCN: Disabled\n");
3598 } else {
3599 seq_printf(m, "VCN: Enabled\n");
3600 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3601 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3602 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3603 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3604 }
3605 }
3606 seq_printf(m, "\n");
3607 } else {
3608
3609 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3610 if (!value) {
3611 seq_printf(m, "UVD: Disabled\n");
3612 } else {
3613 seq_printf(m, "UVD: Enabled\n");
3614 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3615 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3616 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3617 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3618 }
3619 }
3620 seq_printf(m, "\n");
3621
3622
3623 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3624 if (!value) {
3625 seq_printf(m, "VCE: Disabled\n");
3626 } else {
3627 seq_printf(m, "VCE: Enabled\n");
3628 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3629 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3630 }
3631 }
3632 }
3633
3634 return 0;
3635}
3636
3637static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3638{
3639 int i;
3640
3641 for (i = 0; clocks[i].flag; i++)
3642 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3643 (flags & clocks[i].flag) ? "On" : "Off");
3644}
3645
3646static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3647{
3648 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3649 struct drm_device *dev = adev_to_drm(adev);
3650 u32 flags = 0;
3651 int r;
3652
3653 if (amdgpu_in_reset(adev))
3654 return -EPERM;
3655 if (adev->in_suspend && !adev->in_runpm)
3656 return -EPERM;
3657
3658 r = pm_runtime_get_sync(dev->dev);
3659 if (r < 0) {
3660 pm_runtime_put_autosuspend(dev->dev);
3661 return r;
3662 }
3663
3664 if (!adev->pm.dpm_enabled) {
3665 seq_printf(m, "dpm not enabled\n");
3666 pm_runtime_mark_last_busy(dev->dev);
3667 pm_runtime_put_autosuspend(dev->dev);
3668 return 0;
3669 }
3670
3671 if (!is_support_sw_smu(adev) &&
3672 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3673 mutex_lock(&adev->pm.mutex);
3674 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3675 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3676 else
3677 seq_printf(m, "Debugfs support not implemented for this asic\n");
3678 mutex_unlock(&adev->pm.mutex);
3679 r = 0;
3680 } else {
3681 r = amdgpu_debugfs_pm_info_pp(m, adev);
3682 }
3683 if (r)
3684 goto out;
3685
3686 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3687
3688 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3689 amdgpu_parse_cg_state(m, flags);
3690 seq_printf(m, "\n");
3691
3692out:
3693 pm_runtime_mark_last_busy(dev->dev);
3694 pm_runtime_put_autosuspend(dev->dev);
3695
3696 return r;
3697}
3698
3699DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3700
3701
3702
3703
3704
3705
3706static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3707 size_t size, loff_t *pos)
3708{
3709 struct amdgpu_device *adev = file_inode(f)->i_private;
3710 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
3711 void *pp_handle = adev->powerplay.pp_handle;
3712 size_t smu_prv_buf_size;
3713 void *smu_prv_buf;
3714
3715 if (amdgpu_in_reset(adev))
3716 return -EPERM;
3717 if (adev->in_suspend && !adev->in_runpm)
3718 return -EPERM;
3719
3720 if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
3721 pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
3722 &smu_prv_buf_size);
3723 else
3724 return -ENOSYS;
3725
3726 if (!smu_prv_buf || !smu_prv_buf_size)
3727 return -EINVAL;
3728
3729 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3730 smu_prv_buf_size);
3731}
3732
3733static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3734 .owner = THIS_MODULE,
3735 .open = simple_open,
3736 .read = amdgpu_pm_prv_buffer_read,
3737 .llseek = default_llseek,
3738};
3739
3740#endif
3741
3742void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3743{
3744#if defined(CONFIG_DEBUG_FS)
3745 struct drm_minor *minor = adev_to_drm(adev)->primary;
3746 struct dentry *root = minor->debugfs_root;
3747
3748 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3749 &amdgpu_debugfs_pm_info_fops);
3750
3751 if (adev->pm.smu_prv_buffer_size > 0)
3752 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3753 adev,
3754 &amdgpu_debugfs_pm_prv_buffer_fops,
3755 adev->pm.smu_prv_buffer_size);
3756#endif
3757}
3758