1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "amdgpu_display.h"
31#include "atom.h"
32#include <linux/power_supply.h>
33#include <linux/hwmon.h>
34#include <linux/hwmon-sysfs.h>
35#include <linux/nospec.h>
36#include "hwmgr.h"
37#define WIDTH_4K 3840
38
39static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
40
41static const struct cg_flag_name clocks[] = {
42 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
43 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
44 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
45 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
47 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
51 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
52 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
53 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
54 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
55 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
58 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
61 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
64 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
66 {0, NULL},
67};
68
69void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
70{
71 if (adev->pm.dpm_enabled) {
72 mutex_lock(&adev->pm.mutex);
73 if (power_supply_is_system_supplied() > 0)
74 adev->pm.ac_power = true;
75 else
76 adev->pm.ac_power = false;
77 if (adev->powerplay.pp_funcs->enable_bapm)
78 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
79 mutex_unlock(&adev->pm.mutex);
80 }
81}
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117static ssize_t amdgpu_get_dpm_state(struct device *dev,
118 struct device_attribute *attr,
119 char *buf)
120{
121 struct drm_device *ddev = dev_get_drvdata(dev);
122 struct amdgpu_device *adev = ddev->dev_private;
123 enum amd_pm_state_type pm;
124
125 if (adev->powerplay.pp_funcs->get_current_power_state)
126 pm = amdgpu_dpm_get_current_power_state(adev);
127 else
128 pm = adev->pm.dpm.user_state;
129
130 return snprintf(buf, PAGE_SIZE, "%s\n",
131 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
132 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
133}
134
135static ssize_t amdgpu_set_dpm_state(struct device *dev,
136 struct device_attribute *attr,
137 const char *buf,
138 size_t count)
139{
140 struct drm_device *ddev = dev_get_drvdata(dev);
141 struct amdgpu_device *adev = ddev->dev_private;
142 enum amd_pm_state_type state;
143
144 if (strncmp("battery", buf, strlen("battery")) == 0)
145 state = POWER_STATE_TYPE_BATTERY;
146 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
147 state = POWER_STATE_TYPE_BALANCED;
148 else if (strncmp("performance", buf, strlen("performance")) == 0)
149 state = POWER_STATE_TYPE_PERFORMANCE;
150 else {
151 count = -EINVAL;
152 goto fail;
153 }
154
155 if (adev->powerplay.pp_funcs->dispatch_tasks) {
156 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
157 } else {
158 mutex_lock(&adev->pm.mutex);
159 adev->pm.dpm.user_state = state;
160 mutex_unlock(&adev->pm.mutex);
161
162
163 if (!(adev->flags & AMD_IS_PX) ||
164 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
165 amdgpu_pm_compute_clocks(adev);
166 }
167fail:
168 return count;
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234{
235 struct drm_device *ddev = dev_get_drvdata(dev);
236 struct amdgpu_device *adev = ddev->dev_private;
237 enum amd_dpm_forced_level level = 0xff;
238
239 if ((adev->flags & AMD_IS_PX) &&
240 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
241 return snprintf(buf, PAGE_SIZE, "off\n");
242
243 if (adev->powerplay.pp_funcs->get_performance_level)
244 level = amdgpu_dpm_get_performance_level(adev);
245 else
246 level = adev->pm.dpm.forced_level;
247
248 return snprintf(buf, PAGE_SIZE, "%s\n",
249 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
250 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
251 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
252 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
253 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
254 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
255 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
256 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
257 "unknown");
258}
259
260static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
261 struct device_attribute *attr,
262 const char *buf,
263 size_t count)
264{
265 struct drm_device *ddev = dev_get_drvdata(dev);
266 struct amdgpu_device *adev = ddev->dev_private;
267 enum amd_dpm_forced_level level;
268 enum amd_dpm_forced_level current_level = 0xff;
269 int ret = 0;
270
271
272 if ((adev->flags & AMD_IS_PX) &&
273 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
274 return -EINVAL;
275
276 if (adev->powerplay.pp_funcs->get_performance_level)
277 current_level = amdgpu_dpm_get_performance_level(adev);
278
279 if (strncmp("low", buf, strlen("low")) == 0) {
280 level = AMD_DPM_FORCED_LEVEL_LOW;
281 } else if (strncmp("high", buf, strlen("high")) == 0) {
282 level = AMD_DPM_FORCED_LEVEL_HIGH;
283 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
284 level = AMD_DPM_FORCED_LEVEL_AUTO;
285 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
286 level = AMD_DPM_FORCED_LEVEL_MANUAL;
287 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
288 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
289 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
290 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
291 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
292 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
293 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
294 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
295 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
296 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
297 } else {
298 count = -EINVAL;
299 goto fail;
300 }
301
302 if (current_level == level)
303 return count;
304
305 if (adev->powerplay.pp_funcs->force_performance_level) {
306 mutex_lock(&adev->pm.mutex);
307 if (adev->pm.dpm.thermal_active) {
308 count = -EINVAL;
309 mutex_unlock(&adev->pm.mutex);
310 goto fail;
311 }
312 ret = amdgpu_dpm_force_performance_level(adev, level);
313 if (ret)
314 count = -EINVAL;
315 else
316 adev->pm.dpm.forced_level = level;
317 mutex_unlock(&adev->pm.mutex);
318 }
319
320fail:
321 return count;
322}
323
324static ssize_t amdgpu_get_pp_num_states(struct device *dev,
325 struct device_attribute *attr,
326 char *buf)
327{
328 struct drm_device *ddev = dev_get_drvdata(dev);
329 struct amdgpu_device *adev = ddev->dev_private;
330 struct pp_states_info data;
331 int i, buf_len;
332
333 if (adev->powerplay.pp_funcs->get_pp_num_states)
334 amdgpu_dpm_get_pp_num_states(adev, &data);
335
336 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
337 for (i = 0; i < data.nums; i++)
338 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
339 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
340 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
341 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
342 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
343
344 return buf_len;
345}
346
347static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
348 struct device_attribute *attr,
349 char *buf)
350{
351 struct drm_device *ddev = dev_get_drvdata(dev);
352 struct amdgpu_device *adev = ddev->dev_private;
353 struct pp_states_info data;
354 enum amd_pm_state_type pm = 0;
355 int i = 0;
356
357 if (adev->powerplay.pp_funcs->get_current_power_state
358 && adev->powerplay.pp_funcs->get_pp_num_states) {
359 pm = amdgpu_dpm_get_current_power_state(adev);
360 amdgpu_dpm_get_pp_num_states(adev, &data);
361
362 for (i = 0; i < data.nums; i++) {
363 if (pm == data.states[i])
364 break;
365 }
366
367 if (i == data.nums)
368 i = -EINVAL;
369 }
370
371 return snprintf(buf, PAGE_SIZE, "%d\n", i);
372}
373
374static ssize_t amdgpu_get_pp_force_state(struct device *dev,
375 struct device_attribute *attr,
376 char *buf)
377{
378 struct drm_device *ddev = dev_get_drvdata(dev);
379 struct amdgpu_device *adev = ddev->dev_private;
380
381 if (adev->pp_force_state_enabled)
382 return amdgpu_get_pp_cur_state(dev, attr, buf);
383 else
384 return snprintf(buf, PAGE_SIZE, "\n");
385}
386
387static ssize_t amdgpu_set_pp_force_state(struct device *dev,
388 struct device_attribute *attr,
389 const char *buf,
390 size_t count)
391{
392 struct drm_device *ddev = dev_get_drvdata(dev);
393 struct amdgpu_device *adev = ddev->dev_private;
394 enum amd_pm_state_type state = 0;
395 unsigned long idx;
396 int ret;
397
398 if (strlen(buf) == 1)
399 adev->pp_force_state_enabled = false;
400 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
401 adev->powerplay.pp_funcs->get_pp_num_states) {
402 struct pp_states_info data;
403
404 ret = kstrtoul(buf, 0, &idx);
405 if (ret || idx >= ARRAY_SIZE(data.states)) {
406 count = -EINVAL;
407 goto fail;
408 }
409 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
410
411 amdgpu_dpm_get_pp_num_states(adev, &data);
412 state = data.states[idx];
413
414 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
415 state != POWER_STATE_TYPE_DEFAULT) {
416 amdgpu_dpm_dispatch_task(adev,
417 AMD_PP_TASK_ENABLE_USER_STATE, &state);
418 adev->pp_force_state_enabled = true;
419 }
420 }
421fail:
422 return count;
423}
424
425
426
427
428
429
430
431
432
433
434
435
436static ssize_t amdgpu_get_pp_table(struct device *dev,
437 struct device_attribute *attr,
438 char *buf)
439{
440 struct drm_device *ddev = dev_get_drvdata(dev);
441 struct amdgpu_device *adev = ddev->dev_private;
442 char *table = NULL;
443 int size;
444
445 if (adev->powerplay.pp_funcs->get_pp_table)
446 size = amdgpu_dpm_get_pp_table(adev, &table);
447 else
448 return 0;
449
450 if (size >= PAGE_SIZE)
451 size = PAGE_SIZE - 1;
452
453 memcpy(buf, table, size);
454
455 return size;
456}
457
458static ssize_t amdgpu_set_pp_table(struct device *dev,
459 struct device_attribute *attr,
460 const char *buf,
461 size_t count)
462{
463 struct drm_device *ddev = dev_get_drvdata(dev);
464 struct amdgpu_device *adev = ddev->dev_private;
465
466 if (adev->powerplay.pp_funcs->set_pp_table)
467 amdgpu_dpm_set_pp_table(adev, buf, count);
468
469 return count;
470}
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf,
541 size_t count)
542{
543 struct drm_device *ddev = dev_get_drvdata(dev);
544 struct amdgpu_device *adev = ddev->dev_private;
545 int ret;
546 uint32_t parameter_size = 0;
547 long parameter[64];
548 char buf_cpy[128];
549 char *tmp_str;
550 char *sub_str;
551 const char delimiter[3] = {' ', '\n', '\0'};
552 uint32_t type;
553
554 if (count > 127)
555 return -EINVAL;
556
557 if (*buf == 's')
558 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
559 else if (*buf == 'm')
560 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
561 else if(*buf == 'r')
562 type = PP_OD_RESTORE_DEFAULT_TABLE;
563 else if (*buf == 'c')
564 type = PP_OD_COMMIT_DPM_TABLE;
565 else if (!strncmp(buf, "vc", 2))
566 type = PP_OD_EDIT_VDDC_CURVE;
567 else
568 return -EINVAL;
569
570 memcpy(buf_cpy, buf, count+1);
571
572 tmp_str = buf_cpy;
573
574 if (type == PP_OD_EDIT_VDDC_CURVE)
575 tmp_str++;
576 while (isspace(*++tmp_str));
577
578 while (tmp_str[0]) {
579 sub_str = strsep(&tmp_str, delimiter);
580 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
581 if (ret)
582 return -EINVAL;
583 parameter_size++;
584
585 while (isspace(*tmp_str))
586 tmp_str++;
587 }
588
589 if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
590 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
591 parameter, parameter_size);
592
593 if (ret)
594 return -EINVAL;
595
596 if (type == PP_OD_COMMIT_DPM_TABLE) {
597 if (adev->powerplay.pp_funcs->dispatch_tasks) {
598 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
599 return count;
600 } else {
601 return -EINVAL;
602 }
603 }
604
605 return count;
606}
607
608static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
609 struct device_attribute *attr,
610 char *buf)
611{
612 struct drm_device *ddev = dev_get_drvdata(dev);
613 struct amdgpu_device *adev = ddev->dev_private;
614 uint32_t size = 0;
615
616 if (adev->powerplay.pp_funcs->print_clock_levels) {
617 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
618 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
619 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
620 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
621 return size;
622 } else {
623 return snprintf(buf, PAGE_SIZE, "\n");
624 }
625
626}
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
645 struct device_attribute *attr,
646 const char *buf,
647 size_t count)
648{
649 struct drm_device *ddev = dev_get_drvdata(dev);
650 struct amdgpu_device *adev = ddev->dev_private;
651 uint64_t featuremask;
652 int ret;
653
654 ret = kstrtou64(buf, 0, &featuremask);
655 if (ret)
656 return -EINVAL;
657
658 pr_debug("featuremask = 0x%llx\n", featuremask);
659
660 if (adev->powerplay.pp_funcs->set_ppfeature_status) {
661 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
662 if (ret)
663 return -EINVAL;
664 }
665
666 return count;
667}
668
669static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
670 struct device_attribute *attr,
671 char *buf)
672{
673 struct drm_device *ddev = dev_get_drvdata(dev);
674 struct amdgpu_device *adev = ddev->dev_private;
675
676 if (adev->powerplay.pp_funcs->get_ppfeature_status)
677 return amdgpu_dpm_get_ppfeature_status(adev, buf);
678
679 return snprintf(buf, PAGE_SIZE, "\n");
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
708 struct device_attribute *attr,
709 char *buf)
710{
711 struct drm_device *ddev = dev_get_drvdata(dev);
712 struct amdgpu_device *adev = ddev->dev_private;
713
714 if (adev->powerplay.pp_funcs->print_clock_levels)
715 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
716 else
717 return snprintf(buf, PAGE_SIZE, "\n");
718}
719
720
721
722
723
724#define AMDGPU_MASK_BUF_MAX (32 * 13)
725
726static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
727{
728 int ret;
729 long level;
730 char *sub_str = NULL;
731 char *tmp;
732 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
733 const char delimiter[3] = {' ', '\n', '\0'};
734 size_t bytes;
735
736 *mask = 0;
737
738 bytes = min(count, sizeof(buf_cpy) - 1);
739 memcpy(buf_cpy, buf, bytes);
740 buf_cpy[bytes] = '\0';
741 tmp = buf_cpy;
742 while (tmp[0]) {
743 sub_str = strsep(&tmp, delimiter);
744 if (strlen(sub_str)) {
745 ret = kstrtol(sub_str, 0, &level);
746 if (ret)
747 return -EINVAL;
748 *mask |= 1 << level;
749 } else
750 break;
751 }
752
753 return 0;
754}
755
756static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
757 struct device_attribute *attr,
758 const char *buf,
759 size_t count)
760{
761 struct drm_device *ddev = dev_get_drvdata(dev);
762 struct amdgpu_device *adev = ddev->dev_private;
763 int ret;
764 uint32_t mask = 0;
765
766 ret = amdgpu_read_mask(buf, count, &mask);
767 if (ret)
768 return ret;
769
770 if (adev->powerplay.pp_funcs->force_clock_level)
771 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
772
773 if (ret)
774 return -EINVAL;
775
776 return count;
777}
778
779static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
780 struct device_attribute *attr,
781 char *buf)
782{
783 struct drm_device *ddev = dev_get_drvdata(dev);
784 struct amdgpu_device *adev = ddev->dev_private;
785
786 if (adev->powerplay.pp_funcs->print_clock_levels)
787 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
788 else
789 return snprintf(buf, PAGE_SIZE, "\n");
790}
791
792static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf,
795 size_t count)
796{
797 struct drm_device *ddev = dev_get_drvdata(dev);
798 struct amdgpu_device *adev = ddev->dev_private;
799 int ret;
800 uint32_t mask = 0;
801
802 ret = amdgpu_read_mask(buf, count, &mask);
803 if (ret)
804 return ret;
805
806 if (adev->powerplay.pp_funcs->force_clock_level)
807 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
808
809 if (ret)
810 return -EINVAL;
811
812 return count;
813}
814
815static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
816 struct device_attribute *attr,
817 char *buf)
818{
819 struct drm_device *ddev = dev_get_drvdata(dev);
820 struct amdgpu_device *adev = ddev->dev_private;
821
822 if (adev->powerplay.pp_funcs->print_clock_levels)
823 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
824 else
825 return snprintf(buf, PAGE_SIZE, "\n");
826}
827
828static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
829 struct device_attribute *attr,
830 const char *buf,
831 size_t count)
832{
833 struct drm_device *ddev = dev_get_drvdata(dev);
834 struct amdgpu_device *adev = ddev->dev_private;
835 int ret;
836 uint32_t mask = 0;
837
838 ret = amdgpu_read_mask(buf, count, &mask);
839 if (ret)
840 return ret;
841
842 if (adev->powerplay.pp_funcs->force_clock_level)
843 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
844
845 if (ret)
846 return -EINVAL;
847
848 return count;
849}
850
851static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
852 struct device_attribute *attr,
853 char *buf)
854{
855 struct drm_device *ddev = dev_get_drvdata(dev);
856 struct amdgpu_device *adev = ddev->dev_private;
857
858 if (adev->powerplay.pp_funcs->print_clock_levels)
859 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
860 else
861 return snprintf(buf, PAGE_SIZE, "\n");
862}
863
864static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
865 struct device_attribute *attr,
866 const char *buf,
867 size_t count)
868{
869 struct drm_device *ddev = dev_get_drvdata(dev);
870 struct amdgpu_device *adev = ddev->dev_private;
871 int ret;
872 uint32_t mask = 0;
873
874 ret = amdgpu_read_mask(buf, count, &mask);
875 if (ret)
876 return ret;
877
878 if (adev->powerplay.pp_funcs->force_clock_level)
879 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
880
881 if (ret)
882 return -EINVAL;
883
884 return count;
885}
886
887static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
888 struct device_attribute *attr,
889 char *buf)
890{
891 struct drm_device *ddev = dev_get_drvdata(dev);
892 struct amdgpu_device *adev = ddev->dev_private;
893
894 if (adev->powerplay.pp_funcs->print_clock_levels)
895 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
896 else
897 return snprintf(buf, PAGE_SIZE, "\n");
898}
899
900static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
901 struct device_attribute *attr,
902 const char *buf,
903 size_t count)
904{
905 struct drm_device *ddev = dev_get_drvdata(dev);
906 struct amdgpu_device *adev = ddev->dev_private;
907 int ret;
908 uint32_t mask = 0;
909
910 ret = amdgpu_read_mask(buf, count, &mask);
911 if (ret)
912 return ret;
913
914 if (adev->powerplay.pp_funcs->force_clock_level)
915 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
916
917 if (ret)
918 return -EINVAL;
919
920 return count;
921}
922
923static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
924 struct device_attribute *attr,
925 char *buf)
926{
927 struct drm_device *ddev = dev_get_drvdata(dev);
928 struct amdgpu_device *adev = ddev->dev_private;
929
930 if (adev->powerplay.pp_funcs->print_clock_levels)
931 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
932 else
933 return snprintf(buf, PAGE_SIZE, "\n");
934}
935
936static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
937 struct device_attribute *attr,
938 const char *buf,
939 size_t count)
940{
941 struct drm_device *ddev = dev_get_drvdata(dev);
942 struct amdgpu_device *adev = ddev->dev_private;
943 int ret;
944 uint32_t mask = 0;
945
946 ret = amdgpu_read_mask(buf, count, &mask);
947 if (ret)
948 return ret;
949
950 if (adev->powerplay.pp_funcs->force_clock_level)
951 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
952
953 if (ret)
954 return -EINVAL;
955
956 return count;
957}
958
959static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
960 struct device_attribute *attr,
961 char *buf)
962{
963 struct drm_device *ddev = dev_get_drvdata(dev);
964 struct amdgpu_device *adev = ddev->dev_private;
965 uint32_t value = 0;
966
967 if (adev->powerplay.pp_funcs->get_sclk_od)
968 value = amdgpu_dpm_get_sclk_od(adev);
969
970 return snprintf(buf, PAGE_SIZE, "%d\n", value);
971}
972
973static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
974 struct device_attribute *attr,
975 const char *buf,
976 size_t count)
977{
978 struct drm_device *ddev = dev_get_drvdata(dev);
979 struct amdgpu_device *adev = ddev->dev_private;
980 int ret;
981 long int value;
982
983 ret = kstrtol(buf, 0, &value);
984
985 if (ret) {
986 count = -EINVAL;
987 goto fail;
988 }
989 if (adev->powerplay.pp_funcs->set_sclk_od)
990 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
991
992 if (adev->powerplay.pp_funcs->dispatch_tasks) {
993 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
994 } else {
995 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
996 amdgpu_pm_compute_clocks(adev);
997 }
998
999fail:
1000 return count;
1001}
1002
1003static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1004 struct device_attribute *attr,
1005 char *buf)
1006{
1007 struct drm_device *ddev = dev_get_drvdata(dev);
1008 struct amdgpu_device *adev = ddev->dev_private;
1009 uint32_t value = 0;
1010
1011 if (adev->powerplay.pp_funcs->get_mclk_od)
1012 value = amdgpu_dpm_get_mclk_od(adev);
1013
1014 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1015}
1016
1017static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1018 struct device_attribute *attr,
1019 const char *buf,
1020 size_t count)
1021{
1022 struct drm_device *ddev = dev_get_drvdata(dev);
1023 struct amdgpu_device *adev = ddev->dev_private;
1024 int ret;
1025 long int value;
1026
1027 ret = kstrtol(buf, 0, &value);
1028
1029 if (ret) {
1030 count = -EINVAL;
1031 goto fail;
1032 }
1033 if (adev->powerplay.pp_funcs->set_mclk_od)
1034 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1035
1036 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1037 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1038 } else {
1039 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1040 amdgpu_pm_compute_clocks(adev);
1041 }
1042
1043fail:
1044 return count;
1045}
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1068 struct device_attribute *attr,
1069 char *buf)
1070{
1071 struct drm_device *ddev = dev_get_drvdata(dev);
1072 struct amdgpu_device *adev = ddev->dev_private;
1073
1074 if (adev->powerplay.pp_funcs->get_power_profile_mode)
1075 return amdgpu_dpm_get_power_profile_mode(adev, buf);
1076
1077 return snprintf(buf, PAGE_SIZE, "\n");
1078}
1079
1080
1081static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1082 struct device_attribute *attr,
1083 const char *buf,
1084 size_t count)
1085{
1086 int ret = 0xff;
1087 struct drm_device *ddev = dev_get_drvdata(dev);
1088 struct amdgpu_device *adev = ddev->dev_private;
1089 uint32_t parameter_size = 0;
1090 long parameter[64];
1091 char *sub_str, buf_cpy[128];
1092 char *tmp_str;
1093 uint32_t i = 0;
1094 char tmp[2];
1095 long int profile_mode = 0;
1096 const char delimiter[3] = {' ', '\n', '\0'};
1097
1098 tmp[0] = *(buf);
1099 tmp[1] = '\0';
1100 ret = kstrtol(tmp, 0, &profile_mode);
1101 if (ret)
1102 goto fail;
1103
1104 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1105 if (count < 2 || count > 127)
1106 return -EINVAL;
1107 while (isspace(*++buf))
1108 i++;
1109 memcpy(buf_cpy, buf, count-i);
1110 tmp_str = buf_cpy;
1111 while (tmp_str[0]) {
1112 sub_str = strsep(&tmp_str, delimiter);
1113 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1114 if (ret) {
1115 count = -EINVAL;
1116 goto fail;
1117 }
1118 parameter_size++;
1119 while (isspace(*tmp_str))
1120 tmp_str++;
1121 }
1122 }
1123 parameter[parameter_size] = profile_mode;
1124 if (adev->powerplay.pp_funcs->set_power_profile_mode)
1125 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1126
1127 if (!ret)
1128 return count;
1129fail:
1130 return -EINVAL;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static ssize_t amdgpu_get_busy_percent(struct device *dev,
1142 struct device_attribute *attr,
1143 char *buf)
1144{
1145 struct drm_device *ddev = dev_get_drvdata(dev);
1146 struct amdgpu_device *adev = ddev->dev_private;
1147 int r, value, size = sizeof(value);
1148
1149
1150 if (!(adev->powerplay.pp_funcs &&
1151 adev->powerplay.pp_funcs->read_sensor))
1152 return -EINVAL;
1153
1154
1155 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1156 (void *)&value, &size);
1157 if (r)
1158 return r;
1159
1160 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1161}
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1176 struct device_attribute *attr,
1177 char *buf)
1178{
1179 struct drm_device *ddev = dev_get_drvdata(dev);
1180 struct amdgpu_device *adev = ddev->dev_private;
1181 uint64_t count0, count1;
1182
1183 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1184 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1185 count0, count1, pcie_get_mps(adev->pdev));
1186}
1187
1188static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
1189static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
1190 amdgpu_get_dpm_forced_performance_level,
1191 amdgpu_set_dpm_forced_performance_level);
1192static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
1193static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
1194static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
1195 amdgpu_get_pp_force_state,
1196 amdgpu_set_pp_force_state);
1197static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
1198 amdgpu_get_pp_table,
1199 amdgpu_set_pp_table);
1200static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1201 amdgpu_get_pp_dpm_sclk,
1202 amdgpu_set_pp_dpm_sclk);
1203static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1204 amdgpu_get_pp_dpm_mclk,
1205 amdgpu_set_pp_dpm_mclk);
1206static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1207 amdgpu_get_pp_dpm_socclk,
1208 amdgpu_set_pp_dpm_socclk);
1209static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1210 amdgpu_get_pp_dpm_fclk,
1211 amdgpu_set_pp_dpm_fclk);
1212static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1213 amdgpu_get_pp_dpm_dcefclk,
1214 amdgpu_set_pp_dpm_dcefclk);
1215static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1216 amdgpu_get_pp_dpm_pcie,
1217 amdgpu_set_pp_dpm_pcie);
1218static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
1219 amdgpu_get_pp_sclk_od,
1220 amdgpu_set_pp_sclk_od);
1221static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
1222 amdgpu_get_pp_mclk_od,
1223 amdgpu_set_pp_mclk_od);
1224static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
1225 amdgpu_get_pp_power_profile_mode,
1226 amdgpu_set_pp_power_profile_mode);
1227static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1228 amdgpu_get_pp_od_clk_voltage,
1229 amdgpu_set_pp_od_clk_voltage);
1230static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1231 amdgpu_get_busy_percent, NULL);
1232static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1233static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
1234 amdgpu_get_ppfeature_status,
1235 amdgpu_set_ppfeature_status);
1236
1237static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1238 struct device_attribute *attr,
1239 char *buf)
1240{
1241 struct amdgpu_device *adev = dev_get_drvdata(dev);
1242 struct drm_device *ddev = adev->ddev;
1243 int r, temp, size = sizeof(temp);
1244
1245
1246 if ((adev->flags & AMD_IS_PX) &&
1247 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1248 return -EINVAL;
1249
1250
1251 if (!(adev->powerplay.pp_funcs &&
1252 adev->powerplay.pp_funcs->read_sensor))
1253 return -EINVAL;
1254
1255
1256 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1257 (void *)&temp, &size);
1258 if (r)
1259 return r;
1260
1261 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1262}
1263
1264static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1265 struct device_attribute *attr,
1266 char *buf)
1267{
1268 struct amdgpu_device *adev = dev_get_drvdata(dev);
1269 int hyst = to_sensor_dev_attr(attr)->index;
1270 int temp;
1271
1272 if (hyst)
1273 temp = adev->pm.dpm.thermal.min_temp;
1274 else
1275 temp = adev->pm.dpm.thermal.max_temp;
1276
1277 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1278}
1279
1280static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1281 struct device_attribute *attr,
1282 char *buf)
1283{
1284 struct amdgpu_device *adev = dev_get_drvdata(dev);
1285 u32 pwm_mode = 0;
1286
1287 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1288 return -EINVAL;
1289
1290 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1291
1292 return sprintf(buf, "%i\n", pwm_mode);
1293}
1294
1295static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
1296 struct device_attribute *attr,
1297 const char *buf,
1298 size_t count)
1299{
1300 struct amdgpu_device *adev = dev_get_drvdata(dev);
1301 int err;
1302 int value;
1303
1304
1305 if ((adev->flags & AMD_IS_PX) &&
1306 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1307 return -EINVAL;
1308
1309 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1310 return -EINVAL;
1311
1312 err = kstrtoint(buf, 10, &value);
1313 if (err)
1314 return err;
1315
1316 amdgpu_dpm_set_fan_control_mode(adev, value);
1317
1318 return count;
1319}
1320
1321static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
1322 struct device_attribute *attr,
1323 char *buf)
1324{
1325 return sprintf(buf, "%i\n", 0);
1326}
1327
1328static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
1329 struct device_attribute *attr,
1330 char *buf)
1331{
1332 return sprintf(buf, "%i\n", 255);
1333}
1334
1335static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
1336 struct device_attribute *attr,
1337 const char *buf, size_t count)
1338{
1339 struct amdgpu_device *adev = dev_get_drvdata(dev);
1340 int err;
1341 u32 value;
1342 u32 pwm_mode;
1343
1344
1345 if ((adev->flags & AMD_IS_PX) &&
1346 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1347 return -EINVAL;
1348
1349 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1350 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
1351 pr_info("manual fan speed control should be enabled first\n");
1352 return -EINVAL;
1353 }
1354
1355 err = kstrtou32(buf, 10, &value);
1356 if (err)
1357 return err;
1358
1359 value = (value * 100) / 255;
1360
1361 if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
1362 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
1363 if (err)
1364 return err;
1365 }
1366
1367 return count;
1368}
1369
1370static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
1371 struct device_attribute *attr,
1372 char *buf)
1373{
1374 struct amdgpu_device *adev = dev_get_drvdata(dev);
1375 int err;
1376 u32 speed = 0;
1377
1378
1379 if ((adev->flags & AMD_IS_PX) &&
1380 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1381 return -EINVAL;
1382
1383 if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
1384 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
1385 if (err)
1386 return err;
1387 }
1388
1389 speed = (speed * 255) / 100;
1390
1391 return sprintf(buf, "%i\n", speed);
1392}
1393
1394static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1395 struct device_attribute *attr,
1396 char *buf)
1397{
1398 struct amdgpu_device *adev = dev_get_drvdata(dev);
1399 int err;
1400 u32 speed = 0;
1401
1402
1403 if ((adev->flags & AMD_IS_PX) &&
1404 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1405 return -EINVAL;
1406
1407 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1408 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
1409 if (err)
1410 return err;
1411 }
1412
1413 return sprintf(buf, "%i\n", speed);
1414}
1415
1416static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
1417 struct device_attribute *attr,
1418 char *buf)
1419{
1420 struct amdgpu_device *adev = dev_get_drvdata(dev);
1421 u32 min_rpm = 0;
1422 u32 size = sizeof(min_rpm);
1423 int r;
1424
1425 if (!adev->powerplay.pp_funcs->read_sensor)
1426 return -EINVAL;
1427
1428 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
1429 (void *)&min_rpm, &size);
1430 if (r)
1431 return r;
1432
1433 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
1434}
1435
1436static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
1437 struct device_attribute *attr,
1438 char *buf)
1439{
1440 struct amdgpu_device *adev = dev_get_drvdata(dev);
1441 u32 max_rpm = 0;
1442 u32 size = sizeof(max_rpm);
1443 int r;
1444
1445 if (!adev->powerplay.pp_funcs->read_sensor)
1446 return -EINVAL;
1447
1448 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
1449 (void *)&max_rpm, &size);
1450 if (r)
1451 return r;
1452
1453 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
1454}
1455
1456static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
1457 struct device_attribute *attr,
1458 char *buf)
1459{
1460 struct amdgpu_device *adev = dev_get_drvdata(dev);
1461 int err;
1462 u32 rpm = 0;
1463
1464
1465 if ((adev->flags & AMD_IS_PX) &&
1466 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1467 return -EINVAL;
1468
1469 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1470 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
1471 if (err)
1472 return err;
1473 }
1474
1475 return sprintf(buf, "%i\n", rpm);
1476}
1477
1478static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
1479 struct device_attribute *attr,
1480 const char *buf, size_t count)
1481{
1482 struct amdgpu_device *adev = dev_get_drvdata(dev);
1483 int err;
1484 u32 value;
1485 u32 pwm_mode;
1486
1487 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1488 if (pwm_mode != AMD_FAN_CTRL_MANUAL)
1489 return -ENODATA;
1490
1491
1492 if ((adev->flags & AMD_IS_PX) &&
1493 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1494 return -EINVAL;
1495
1496 err = kstrtou32(buf, 10, &value);
1497 if (err)
1498 return err;
1499
1500 if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
1501 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
1502 if (err)
1503 return err;
1504 }
1505
1506 return count;
1507}
1508
1509static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
1510 struct device_attribute *attr,
1511 char *buf)
1512{
1513 struct amdgpu_device *adev = dev_get_drvdata(dev);
1514 u32 pwm_mode = 0;
1515
1516 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1517 return -EINVAL;
1518
1519 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1520
1521 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
1522}
1523
1524static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
1525 struct device_attribute *attr,
1526 const char *buf,
1527 size_t count)
1528{
1529 struct amdgpu_device *adev = dev_get_drvdata(dev);
1530 int err;
1531 int value;
1532 u32 pwm_mode;
1533
1534
1535 if ((adev->flags & AMD_IS_PX) &&
1536 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1537 return -EINVAL;
1538
1539 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1540 return -EINVAL;
1541
1542 err = kstrtoint(buf, 10, &value);
1543 if (err)
1544 return err;
1545
1546 if (value == 0)
1547 pwm_mode = AMD_FAN_CTRL_AUTO;
1548 else if (value == 1)
1549 pwm_mode = AMD_FAN_CTRL_MANUAL;
1550 else
1551 return -EINVAL;
1552
1553 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
1554
1555 return count;
1556}
1557
1558static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
1561{
1562 struct amdgpu_device *adev = dev_get_drvdata(dev);
1563 struct drm_device *ddev = adev->ddev;
1564 u32 vddgfx;
1565 int r, size = sizeof(vddgfx);
1566
1567
1568 if ((adev->flags & AMD_IS_PX) &&
1569 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1570 return -EINVAL;
1571
1572
1573 if (!(adev->powerplay.pp_funcs &&
1574 adev->powerplay.pp_funcs->read_sensor))
1575 return -EINVAL;
1576
1577
1578 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
1579 (void *)&vddgfx, &size);
1580 if (r)
1581 return r;
1582
1583 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
1584}
1585
1586static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
1587 struct device_attribute *attr,
1588 char *buf)
1589{
1590 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
1591}
1592
1593static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1594 struct device_attribute *attr,
1595 char *buf)
1596{
1597 struct amdgpu_device *adev = dev_get_drvdata(dev);
1598 struct drm_device *ddev = adev->ddev;
1599 u32 vddnb;
1600 int r, size = sizeof(vddnb);
1601
1602
1603 if (!(adev->flags & AMD_IS_APU))
1604 return -EINVAL;
1605
1606
1607 if ((adev->flags & AMD_IS_PX) &&
1608 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1609 return -EINVAL;
1610
1611
1612 if (!(adev->powerplay.pp_funcs &&
1613 adev->powerplay.pp_funcs->read_sensor))
1614 return -EINVAL;
1615
1616
1617 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1618 (void *)&vddnb, &size);
1619 if (r)
1620 return r;
1621
1622 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1623}
1624
1625static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1626 struct device_attribute *attr,
1627 char *buf)
1628{
1629 return snprintf(buf, PAGE_SIZE, "vddnb\n");
1630}
1631
1632static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1633 struct device_attribute *attr,
1634 char *buf)
1635{
1636 struct amdgpu_device *adev = dev_get_drvdata(dev);
1637 struct drm_device *ddev = adev->ddev;
1638 u32 query = 0;
1639 int r, size = sizeof(u32);
1640 unsigned uw;
1641
1642
1643 if ((adev->flags & AMD_IS_PX) &&
1644 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1645 return -EINVAL;
1646
1647
1648 if (!(adev->powerplay.pp_funcs &&
1649 adev->powerplay.pp_funcs->read_sensor))
1650 return -EINVAL;
1651
1652
1653 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1654 (void *)&query, &size);
1655 if (r)
1656 return r;
1657
1658
1659 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1660
1661 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1662}
1663
1664static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
1665 struct device_attribute *attr,
1666 char *buf)
1667{
1668 return sprintf(buf, "%i\n", 0);
1669}
1670
1671static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
1672 struct device_attribute *attr,
1673 char *buf)
1674{
1675 struct amdgpu_device *adev = dev_get_drvdata(dev);
1676 uint32_t limit = 0;
1677
1678 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1679 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
1680 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1681 } else {
1682 return snprintf(buf, PAGE_SIZE, "\n");
1683 }
1684}
1685
1686static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
1687 struct device_attribute *attr,
1688 char *buf)
1689{
1690 struct amdgpu_device *adev = dev_get_drvdata(dev);
1691 uint32_t limit = 0;
1692
1693 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1694 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
1695 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1696 } else {
1697 return snprintf(buf, PAGE_SIZE, "\n");
1698 }
1699}
1700
1701
1702static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1703 struct device_attribute *attr,
1704 const char *buf,
1705 size_t count)
1706{
1707 struct amdgpu_device *adev = dev_get_drvdata(dev);
1708 int err;
1709 u32 value;
1710
1711 err = kstrtou32(buf, 10, &value);
1712 if (err)
1713 return err;
1714
1715 value = value / 1000000;
1716 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
1717 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
1718 if (err)
1719 return err;
1720 } else {
1721 return -EINVAL;
1722 }
1723
1724 return count;
1725}
1726
1727static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
1728 struct device_attribute *attr,
1729 char *buf)
1730{
1731 struct amdgpu_device *adev = dev_get_drvdata(dev);
1732 struct drm_device *ddev = adev->ddev;
1733 uint32_t sclk;
1734 int r, size = sizeof(sclk);
1735
1736
1737 if ((adev->flags & AMD_IS_PX) &&
1738 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1739 return -EINVAL;
1740
1741
1742 if (!(adev->powerplay.pp_funcs &&
1743 adev->powerplay.pp_funcs->read_sensor))
1744 return -EINVAL;
1745
1746
1747 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
1748 (void *)&sclk, &size);
1749 if (r)
1750 return r;
1751
1752 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
1753}
1754
1755static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
1756 struct device_attribute *attr,
1757 char *buf)
1758{
1759 return snprintf(buf, PAGE_SIZE, "sclk\n");
1760}
1761
1762static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
1763 struct device_attribute *attr,
1764 char *buf)
1765{
1766 struct amdgpu_device *adev = dev_get_drvdata(dev);
1767 struct drm_device *ddev = adev->ddev;
1768 uint32_t mclk;
1769 int r, size = sizeof(mclk);
1770
1771
1772 if ((adev->flags & AMD_IS_PX) &&
1773 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1774 return -EINVAL;
1775
1776
1777 if (!(adev->powerplay.pp_funcs &&
1778 adev->powerplay.pp_funcs->read_sensor))
1779 return -EINVAL;
1780
1781
1782 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
1783 (void *)&mclk, &size);
1784 if (r)
1785 return r;
1786
1787 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
1788}
1789
1790static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
1791 struct device_attribute *attr,
1792 char *buf)
1793{
1794 return snprintf(buf, PAGE_SIZE, "mclk\n");
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
1871static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
1872static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
1873static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
1874static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
1875static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
1876static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
1877static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
1878static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
1879static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
1880static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
1881static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
1882static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
1883static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
1884static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
1885static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
1886static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
1887static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
1888static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
1889static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
1890static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
1891static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
1892static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
1893static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
1894
1895static struct attribute *hwmon_attributes[] = {
1896 &sensor_dev_attr_temp1_input.dev_attr.attr,
1897 &sensor_dev_attr_temp1_crit.dev_attr.attr,
1898 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
1899 &sensor_dev_attr_pwm1.dev_attr.attr,
1900 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1901 &sensor_dev_attr_pwm1_min.dev_attr.attr,
1902 &sensor_dev_attr_pwm1_max.dev_attr.attr,
1903 &sensor_dev_attr_fan1_input.dev_attr.attr,
1904 &sensor_dev_attr_fan1_min.dev_attr.attr,
1905 &sensor_dev_attr_fan1_max.dev_attr.attr,
1906 &sensor_dev_attr_fan1_target.dev_attr.attr,
1907 &sensor_dev_attr_fan1_enable.dev_attr.attr,
1908 &sensor_dev_attr_in0_input.dev_attr.attr,
1909 &sensor_dev_attr_in0_label.dev_attr.attr,
1910 &sensor_dev_attr_in1_input.dev_attr.attr,
1911 &sensor_dev_attr_in1_label.dev_attr.attr,
1912 &sensor_dev_attr_power1_average.dev_attr.attr,
1913 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
1914 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
1915 &sensor_dev_attr_power1_cap.dev_attr.attr,
1916 &sensor_dev_attr_freq1_input.dev_attr.attr,
1917 &sensor_dev_attr_freq1_label.dev_attr.attr,
1918 &sensor_dev_attr_freq2_input.dev_attr.attr,
1919 &sensor_dev_attr_freq2_label.dev_attr.attr,
1920 NULL
1921};
1922
1923static umode_t hwmon_attributes_visible(struct kobject *kobj,
1924 struct attribute *attr, int index)
1925{
1926 struct device *dev = kobj_to_dev(kobj);
1927 struct amdgpu_device *adev = dev_get_drvdata(dev);
1928 umode_t effective_mode = attr->mode;
1929
1930
1931 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1932 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1933 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1934 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
1935 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
1936 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
1937 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
1938 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
1939 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
1940 return 0;
1941
1942
1943 if ((adev->flags & AMD_IS_APU) &&
1944 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1945 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1946 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1947 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
1948 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
1949 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
1950 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
1951 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
1952 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
1953 return 0;
1954
1955
1956 if (!adev->pm.dpm_enabled &&
1957 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
1958 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
1959 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1960 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1961 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1962 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
1963 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
1964 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
1965 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
1966 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
1967 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
1968 return 0;
1969
1970
1971 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
1972 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
1973 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
1974 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
1975 effective_mode &= ~S_IRUGO;
1976
1977 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1978 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
1979 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
1980 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
1981 effective_mode &= ~S_IWUSR;
1982
1983 if ((adev->flags & AMD_IS_APU) &&
1984 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1985 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1986 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1987 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1988 return 0;
1989
1990
1991 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1992 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
1993 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
1994 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
1995 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1996 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1997 return 0;
1998
1999 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2000 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2001 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2002 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2003 return 0;
2004
2005
2006 if (!(adev->flags & AMD_IS_APU) &&
2007 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2008 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2009 return 0;
2010
2011
2012 if ((adev->flags & AMD_IS_APU) &&
2013 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2014 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2015 return 0;
2016
2017 return effective_mode;
2018}
2019
2020static const struct attribute_group hwmon_attrgroup = {
2021 .attrs = hwmon_attributes,
2022 .is_visible = hwmon_attributes_visible,
2023};
2024
2025static const struct attribute_group *hwmon_groups[] = {
2026 &hwmon_attrgroup,
2027 NULL
2028};
2029
2030void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2031{
2032 struct amdgpu_device *adev =
2033 container_of(work, struct amdgpu_device,
2034 pm.dpm.thermal.work);
2035
2036 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2037 int temp, size = sizeof(temp);
2038
2039 if (!adev->pm.dpm_enabled)
2040 return;
2041
2042 if (adev->powerplay.pp_funcs &&
2043 adev->powerplay.pp_funcs->read_sensor &&
2044 !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2045 (void *)&temp, &size)) {
2046 if (temp < adev->pm.dpm.thermal.min_temp)
2047
2048 dpm_state = adev->pm.dpm.user_state;
2049 } else {
2050 if (adev->pm.dpm.thermal.high_to_low)
2051
2052 dpm_state = adev->pm.dpm.user_state;
2053 }
2054 mutex_lock(&adev->pm.mutex);
2055 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2056 adev->pm.dpm.thermal_active = true;
2057 else
2058 adev->pm.dpm.thermal_active = false;
2059 adev->pm.dpm.state = dpm_state;
2060 mutex_unlock(&adev->pm.mutex);
2061
2062 amdgpu_pm_compute_clocks(adev);
2063}
2064
2065static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2066 enum amd_pm_state_type dpm_state)
2067{
2068 int i;
2069 struct amdgpu_ps *ps;
2070 u32 ui_class;
2071 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2072 true : false;
2073
2074
2075 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2076 if (amdgpu_dpm_vblank_too_short(adev))
2077 single_display = false;
2078 }
2079
2080
2081
2082
2083 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
2084 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2085
2086 if (dpm_state == POWER_STATE_TYPE_BALANCED)
2087 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2088
2089restart_search:
2090
2091 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2092 ps = &adev->pm.dpm.ps[i];
2093 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
2094 switch (dpm_state) {
2095
2096 case POWER_STATE_TYPE_BATTERY:
2097 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
2098 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2099 if (single_display)
2100 return ps;
2101 } else
2102 return ps;
2103 }
2104 break;
2105 case POWER_STATE_TYPE_BALANCED:
2106 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
2107 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2108 if (single_display)
2109 return ps;
2110 } else
2111 return ps;
2112 }
2113 break;
2114 case POWER_STATE_TYPE_PERFORMANCE:
2115 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2116 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2117 if (single_display)
2118 return ps;
2119 } else
2120 return ps;
2121 }
2122 break;
2123
2124 case POWER_STATE_TYPE_INTERNAL_UVD:
2125 if (adev->pm.dpm.uvd_ps)
2126 return adev->pm.dpm.uvd_ps;
2127 else
2128 break;
2129 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2130 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
2131 return ps;
2132 break;
2133 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2134 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
2135 return ps;
2136 break;
2137 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2138 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
2139 return ps;
2140 break;
2141 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2142 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
2143 return ps;
2144 break;
2145 case POWER_STATE_TYPE_INTERNAL_BOOT:
2146 return adev->pm.dpm.boot_ps;
2147 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2148 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
2149 return ps;
2150 break;
2151 case POWER_STATE_TYPE_INTERNAL_ACPI:
2152 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
2153 return ps;
2154 break;
2155 case POWER_STATE_TYPE_INTERNAL_ULV:
2156 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
2157 return ps;
2158 break;
2159 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2160 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2161 return ps;
2162 break;
2163 default:
2164 break;
2165 }
2166 }
2167
2168 switch (dpm_state) {
2169 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2170 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
2171 goto restart_search;
2172 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2173 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2174 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2175 if (adev->pm.dpm.uvd_ps) {
2176 return adev->pm.dpm.uvd_ps;
2177 } else {
2178 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2179 goto restart_search;
2180 }
2181 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2182 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
2183 goto restart_search;
2184 case POWER_STATE_TYPE_INTERNAL_ACPI:
2185 dpm_state = POWER_STATE_TYPE_BATTERY;
2186 goto restart_search;
2187 case POWER_STATE_TYPE_BATTERY:
2188 case POWER_STATE_TYPE_BALANCED:
2189 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2190 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2191 goto restart_search;
2192 default:
2193 break;
2194 }
2195
2196 return NULL;
2197}
2198
2199static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
2200{
2201 struct amdgpu_ps *ps;
2202 enum amd_pm_state_type dpm_state;
2203 int ret;
2204 bool equal = false;
2205
2206
2207 if (!adev->pm.dpm_enabled)
2208 return;
2209
2210 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
2211
2212 if ((!adev->pm.dpm.thermal_active) &&
2213 (!adev->pm.dpm.uvd_active))
2214 adev->pm.dpm.state = adev->pm.dpm.user_state;
2215 }
2216 dpm_state = adev->pm.dpm.state;
2217
2218 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
2219 if (ps)
2220 adev->pm.dpm.requested_ps = ps;
2221 else
2222 return;
2223
2224 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
2225 printk("switching from power state:\n");
2226 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
2227 printk("switching to power state:\n");
2228 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
2229 }
2230
2231
2232 ps->vce_active = adev->pm.dpm.vce_active;
2233 if (adev->powerplay.pp_funcs->display_configuration_changed)
2234 amdgpu_dpm_display_configuration_changed(adev);
2235
2236 ret = amdgpu_dpm_pre_set_power_state(adev);
2237 if (ret)
2238 return;
2239
2240 if (adev->powerplay.pp_funcs->check_state_equal) {
2241 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
2242 equal = false;
2243 }
2244
2245 if (equal)
2246 return;
2247
2248 amdgpu_dpm_set_power_state(adev);
2249 amdgpu_dpm_post_set_power_state(adev);
2250
2251 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2252 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2253
2254 if (adev->powerplay.pp_funcs->force_performance_level) {
2255 if (adev->pm.dpm.thermal_active) {
2256 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
2257
2258 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
2259
2260 adev->pm.dpm.forced_level = level;
2261 } else {
2262
2263 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
2264 }
2265 }
2266}
2267
2268void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
2269{
2270 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2271
2272 mutex_lock(&adev->pm.mutex);
2273 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
2274 mutex_unlock(&adev->pm.mutex);
2275 }
2276
2277 if (adev->asic_type == CHIP_STONEY &&
2278 adev->uvd.decode_image_width >= WIDTH_4K) {
2279 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2280
2281 if (hwmgr && hwmgr->hwmgr_func &&
2282 hwmgr->hwmgr_func->update_nbdpm_pstate)
2283 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
2284 !enable,
2285 true);
2286 }
2287}
2288
2289void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
2290{
2291 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2292
2293 mutex_lock(&adev->pm.mutex);
2294 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
2295 mutex_unlock(&adev->pm.mutex);
2296 }
2297}
2298
2299void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2300{
2301 int i;
2302
2303 if (adev->powerplay.pp_funcs->print_power_state == NULL)
2304 return;
2305
2306 for (i = 0; i < adev->pm.dpm.num_ps; i++)
2307 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
2308
2309}
2310
2311int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2312{
2313 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2314 int ret;
2315
2316 if (adev->pm.sysfs_initialized)
2317 return 0;
2318
2319 if (adev->pm.dpm_enabled == 0)
2320 return 0;
2321
2322 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
2323 DRIVER_NAME, adev,
2324 hwmon_groups);
2325 if (IS_ERR(adev->pm.int_hwmon_dev)) {
2326 ret = PTR_ERR(adev->pm.int_hwmon_dev);
2327 dev_err(adev->dev,
2328 "Unable to register hwmon device: %d\n", ret);
2329 return ret;
2330 }
2331
2332 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
2333 if (ret) {
2334 DRM_ERROR("failed to create device file for dpm state\n");
2335 return ret;
2336 }
2337 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2338 if (ret) {
2339 DRM_ERROR("failed to create device file for dpm state\n");
2340 return ret;
2341 }
2342
2343
2344 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
2345 if (ret) {
2346 DRM_ERROR("failed to create device file pp_num_states\n");
2347 return ret;
2348 }
2349 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
2350 if (ret) {
2351 DRM_ERROR("failed to create device file pp_cur_state\n");
2352 return ret;
2353 }
2354 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
2355 if (ret) {
2356 DRM_ERROR("failed to create device file pp_force_state\n");
2357 return ret;
2358 }
2359 ret = device_create_file(adev->dev, &dev_attr_pp_table);
2360 if (ret) {
2361 DRM_ERROR("failed to create device file pp_table\n");
2362 return ret;
2363 }
2364
2365 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2366 if (ret) {
2367 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2368 return ret;
2369 }
2370 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2371 if (ret) {
2372 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2373 return ret;
2374 }
2375 if (adev->asic_type >= CHIP_VEGA10) {
2376 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
2377 if (ret) {
2378 DRM_ERROR("failed to create device file pp_dpm_socclk\n");
2379 return ret;
2380 }
2381 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2382 if (ret) {
2383 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
2384 return ret;
2385 }
2386 }
2387 if (adev->asic_type >= CHIP_VEGA20) {
2388 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
2389 if (ret) {
2390 DRM_ERROR("failed to create device file pp_dpm_fclk\n");
2391 return ret;
2392 }
2393 }
2394 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
2395 if (ret) {
2396 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
2397 return ret;
2398 }
2399 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
2400 if (ret) {
2401 DRM_ERROR("failed to create device file pp_sclk_od\n");
2402 return ret;
2403 }
2404 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
2405 if (ret) {
2406 DRM_ERROR("failed to create device file pp_mclk_od\n");
2407 return ret;
2408 }
2409 ret = device_create_file(adev->dev,
2410 &dev_attr_pp_power_profile_mode);
2411 if (ret) {
2412 DRM_ERROR("failed to create device file "
2413 "pp_power_profile_mode\n");
2414 return ret;
2415 }
2416 if (hwmgr->od_enabled) {
2417 ret = device_create_file(adev->dev,
2418 &dev_attr_pp_od_clk_voltage);
2419 if (ret) {
2420 DRM_ERROR("failed to create device file "
2421 "pp_od_clk_voltage\n");
2422 return ret;
2423 }
2424 }
2425 ret = device_create_file(adev->dev,
2426 &dev_attr_gpu_busy_percent);
2427 if (ret) {
2428 DRM_ERROR("failed to create device file "
2429 "gpu_busy_level\n");
2430 return ret;
2431 }
2432
2433 if (!(adev->flags & AMD_IS_APU)) {
2434 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
2435 if (ret) {
2436 DRM_ERROR("failed to create device file pcie_bw\n");
2437 return ret;
2438 }
2439 }
2440 ret = amdgpu_debugfs_pm_init(adev);
2441 if (ret) {
2442 DRM_ERROR("Failed to register debugfs file for dpm!\n");
2443 return ret;
2444 }
2445
2446 if ((adev->asic_type >= CHIP_VEGA10) &&
2447 !(adev->flags & AMD_IS_APU)) {
2448 ret = device_create_file(adev->dev,
2449 &dev_attr_ppfeatures);
2450 if (ret) {
2451 DRM_ERROR("failed to create device file "
2452 "ppfeatures\n");
2453 return ret;
2454 }
2455 }
2456
2457 adev->pm.sysfs_initialized = true;
2458
2459 return 0;
2460}
2461
2462void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2463{
2464 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2465
2466 if (adev->pm.dpm_enabled == 0)
2467 return;
2468
2469 if (adev->pm.int_hwmon_dev)
2470 hwmon_device_unregister(adev->pm.int_hwmon_dev);
2471 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
2472 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2473
2474 device_remove_file(adev->dev, &dev_attr_pp_num_states);
2475 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
2476 device_remove_file(adev->dev, &dev_attr_pp_force_state);
2477 device_remove_file(adev->dev, &dev_attr_pp_table);
2478
2479 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2480 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2481 if (adev->asic_type >= CHIP_VEGA10) {
2482 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
2483 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2484 }
2485 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
2486 if (adev->asic_type >= CHIP_VEGA20)
2487 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
2488 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
2489 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2490 device_remove_file(adev->dev,
2491 &dev_attr_pp_power_profile_mode);
2492 if (hwmgr->od_enabled)
2493 device_remove_file(adev->dev,
2494 &dev_attr_pp_od_clk_voltage);
2495 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2496 if (!(adev->flags & AMD_IS_APU))
2497 device_remove_file(adev->dev, &dev_attr_pcie_bw);
2498 if ((adev->asic_type >= CHIP_VEGA10) &&
2499 !(adev->flags & AMD_IS_APU))
2500 device_remove_file(adev->dev, &dev_attr_ppfeatures);
2501}
2502
2503void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
2504{
2505 int i = 0;
2506
2507 if (!adev->pm.dpm_enabled)
2508 return;
2509
2510 if (adev->mode_info.num_crtc)
2511 amdgpu_display_bandwidth_update(adev);
2512
2513 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
2514 struct amdgpu_ring *ring = adev->rings[i];
2515 if (ring && ring->sched.ready)
2516 amdgpu_fence_wait_empty(ring);
2517 }
2518
2519 if (adev->powerplay.pp_funcs->dispatch_tasks) {
2520 if (!amdgpu_device_has_dc_support(adev)) {
2521 mutex_lock(&adev->pm.mutex);
2522 amdgpu_dpm_get_active_displays(adev);
2523 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
2524 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
2525 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
2526
2527 if (adev->pm.pm_display_cfg.vrefresh > 120)
2528 adev->pm.pm_display_cfg.min_vblank_time = 0;
2529 if (adev->powerplay.pp_funcs->display_configuration_change)
2530 adev->powerplay.pp_funcs->display_configuration_change(
2531 adev->powerplay.pp_handle,
2532 &adev->pm.pm_display_cfg);
2533 mutex_unlock(&adev->pm.mutex);
2534 }
2535 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
2536 } else {
2537 mutex_lock(&adev->pm.mutex);
2538 amdgpu_dpm_get_active_displays(adev);
2539 amdgpu_dpm_change_power_state_locked(adev);
2540 mutex_unlock(&adev->pm.mutex);
2541 }
2542}
2543
2544
2545
2546
2547#if defined(CONFIG_DEBUG_FS)
2548
2549static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
2550{
2551 uint32_t value;
2552 uint64_t value64;
2553 uint32_t query = 0;
2554 int size;
2555
2556
2557 if (!(adev->powerplay.pp_funcs &&
2558 adev->powerplay.pp_funcs->read_sensor))
2559 return -EINVAL;
2560
2561
2562 size = sizeof(value);
2563 seq_printf(m, "GFX Clocks and Power:\n");
2564 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
2565 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
2566 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
2567 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
2568 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
2569 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
2570 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
2571 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
2572 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
2573 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
2574 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
2575 seq_printf(m, "\t%u mV (VDDNB)\n", value);
2576 size = sizeof(uint32_t);
2577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
2578 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
2579 size = sizeof(value);
2580 seq_printf(m, "\n");
2581
2582
2583 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
2584 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
2585
2586
2587 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
2588 seq_printf(m, "GPU Load: %u %%\n", value);
2589 seq_printf(m, "\n");
2590
2591
2592 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
2593 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
2594
2595
2596 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
2597 if (!value) {
2598 seq_printf(m, "UVD: Disabled\n");
2599 } else {
2600 seq_printf(m, "UVD: Enabled\n");
2601 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
2602 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
2603 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
2604 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
2605 }
2606 }
2607 seq_printf(m, "\n");
2608
2609
2610 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
2611 if (!value) {
2612 seq_printf(m, "VCE: Disabled\n");
2613 } else {
2614 seq_printf(m, "VCE: Enabled\n");
2615 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
2616 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
2617 }
2618 }
2619
2620 return 0;
2621}
2622
2623static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
2624{
2625 int i;
2626
2627 for (i = 0; clocks[i].flag; i++)
2628 seq_printf(m, "\t%s: %s\n", clocks[i].name,
2629 (flags & clocks[i].flag) ? "On" : "Off");
2630}
2631
2632static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
2633{
2634 struct drm_info_node *node = (struct drm_info_node *) m->private;
2635 struct drm_device *dev = node->minor->dev;
2636 struct amdgpu_device *adev = dev->dev_private;
2637 struct drm_device *ddev = adev->ddev;
2638 u32 flags = 0;
2639
2640 amdgpu_device_ip_get_clockgating_state(adev, &flags);
2641 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
2642 amdgpu_parse_cg_state(m, flags);
2643 seq_printf(m, "\n");
2644
2645 if (!adev->pm.dpm_enabled) {
2646 seq_printf(m, "dpm not enabled\n");
2647 return 0;
2648 }
2649 if ((adev->flags & AMD_IS_PX) &&
2650 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
2651 seq_printf(m, "PX asic powered off\n");
2652 } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
2653 mutex_lock(&adev->pm.mutex);
2654 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
2655 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
2656 else
2657 seq_printf(m, "Debugfs support not implemented for this asic\n");
2658 mutex_unlock(&adev->pm.mutex);
2659 } else {
2660 return amdgpu_debugfs_pm_info_pp(m, adev);
2661 }
2662
2663 return 0;
2664}
2665
2666static const struct drm_info_list amdgpu_pm_info_list[] = {
2667 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
2668};
2669#endif
2670
2671static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
2672{
2673#if defined(CONFIG_DEBUG_FS)
2674 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
2675#else
2676 return 0;
2677#endif
2678}
2679