1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drm_debugfs.h>
27
28#include "amdgpu.h"
29#include "amdgpu_drv.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_dpm.h"
32#include "amdgpu_display.h"
33#include "amdgpu_smu.h"
34#include "atom.h"
35#include <linux/power_supply.h>
36#include <linux/pci.h>
37#include <linux/hwmon.h>
38#include <linux/hwmon-sysfs.h>
39#include <linux/nospec.h>
40#include "hwmgr.h"
41#define WIDTH_4K 3840
42
43static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
44
45static const struct cg_flag_name clocks[] = {
46 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
47 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
49 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
51 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
54 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
55 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
56 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
57 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
59 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
62 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
65 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
67 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
68 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
69 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
70
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
73 {0, NULL},
74};
75
76static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
78 const char *label;
79} temp_label[] = {
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
82 {PP_TEMP_MEM, "mem"},
83};
84
85void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
86{
87 if (adev->pm.dpm_enabled) {
88 mutex_lock(&adev->pm.mutex);
89 if (power_supply_is_system_supplied() > 0)
90 adev->pm.ac_power = true;
91 else
92 adev->pm.ac_power = false;
93 if (adev->powerplay.pp_funcs->enable_bapm)
94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
95 mutex_unlock(&adev->pm.mutex);
96 }
97}
98
99int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
100 void *data, uint32_t *size)
101{
102 int ret = 0;
103
104 if (!data || !size)
105 return -EINVAL;
106
107 if (is_support_sw_smu(adev))
108 ret = smu_read_sensor(&adev->smu, sensor, data, size);
109 else {
110 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
111 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
112 sensor, data, size);
113 else
114 ret = -EINVAL;
115 }
116
117 return ret;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static ssize_t amdgpu_get_dpm_state(struct device *dev,
155 struct device_attribute *attr,
156 char *buf)
157{
158 struct drm_device *ddev = dev_get_drvdata(dev);
159 struct amdgpu_device *adev = ddev->dev_private;
160 enum amd_pm_state_type pm;
161
162 if (is_support_sw_smu(adev)) {
163 if (adev->smu.ppt_funcs->get_current_power_state)
164 pm = amdgpu_smu_get_current_power_state(adev);
165 else
166 pm = adev->pm.dpm.user_state;
167 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
168 pm = amdgpu_dpm_get_current_power_state(adev);
169 } else {
170 pm = adev->pm.dpm.user_state;
171 }
172
173 return snprintf(buf, PAGE_SIZE, "%s\n",
174 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
175 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
176}
177
178static ssize_t amdgpu_set_dpm_state(struct device *dev,
179 struct device_attribute *attr,
180 const char *buf,
181 size_t count)
182{
183 struct drm_device *ddev = dev_get_drvdata(dev);
184 struct amdgpu_device *adev = ddev->dev_private;
185 enum amd_pm_state_type state;
186
187 if (strncmp("battery", buf, strlen("battery")) == 0)
188 state = POWER_STATE_TYPE_BATTERY;
189 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
190 state = POWER_STATE_TYPE_BALANCED;
191 else if (strncmp("performance", buf, strlen("performance")) == 0)
192 state = POWER_STATE_TYPE_PERFORMANCE;
193 else {
194 count = -EINVAL;
195 goto fail;
196 }
197
198 if (is_support_sw_smu(adev)) {
199 mutex_lock(&adev->pm.mutex);
200 adev->pm.dpm.user_state = state;
201 mutex_unlock(&adev->pm.mutex);
202 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
203 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
204 } else {
205 mutex_lock(&adev->pm.mutex);
206 adev->pm.dpm.user_state = state;
207 mutex_unlock(&adev->pm.mutex);
208
209
210 if (!(adev->flags & AMD_IS_PX) ||
211 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
212 amdgpu_pm_compute_clocks(adev);
213 }
214fail:
215 return count;
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
279 struct device_attribute *attr,
280 char *buf)
281{
282 struct drm_device *ddev = dev_get_drvdata(dev);
283 struct amdgpu_device *adev = ddev->dev_private;
284 enum amd_dpm_forced_level level = 0xff;
285
286 if (amdgpu_sriov_vf(adev))
287 return 0;
288
289 if ((adev->flags & AMD_IS_PX) &&
290 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
291 return snprintf(buf, PAGE_SIZE, "off\n");
292
293 if (is_support_sw_smu(adev))
294 level = smu_get_performance_level(&adev->smu);
295 else if (adev->powerplay.pp_funcs->get_performance_level)
296 level = amdgpu_dpm_get_performance_level(adev);
297 else
298 level = adev->pm.dpm.forced_level;
299
300 return snprintf(buf, PAGE_SIZE, "%s\n",
301 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
302 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
303 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
304 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
305 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
306 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
307 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
308 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
309 "unknown");
310}
311
312static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
313 struct device_attribute *attr,
314 const char *buf,
315 size_t count)
316{
317 struct drm_device *ddev = dev_get_drvdata(dev);
318 struct amdgpu_device *adev = ddev->dev_private;
319 enum amd_dpm_forced_level level;
320 enum amd_dpm_forced_level current_level = 0xff;
321 int ret = 0;
322
323
324 if ((adev->flags & AMD_IS_PX) &&
325 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
326 return -EINVAL;
327
328 if (!amdgpu_sriov_vf(adev)) {
329 if (is_support_sw_smu(adev))
330 current_level = smu_get_performance_level(&adev->smu);
331 else if (adev->powerplay.pp_funcs->get_performance_level)
332 current_level = amdgpu_dpm_get_performance_level(adev);
333 }
334
335 if (strncmp("low", buf, strlen("low")) == 0) {
336 level = AMD_DPM_FORCED_LEVEL_LOW;
337 } else if (strncmp("high", buf, strlen("high")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_HIGH;
339 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_AUTO;
341 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
342 level = AMD_DPM_FORCED_LEVEL_MANUAL;
343 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
344 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
345 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
346 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
347 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
348 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
349 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
350 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
351 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
352 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
353 } else {
354 count = -EINVAL;
355 goto fail;
356 }
357
358 if (amdgpu_sriov_vf(adev)) {
359 if (amdgim_is_hwperf(adev) &&
360 adev->virt.ops->force_dpm_level) {
361 mutex_lock(&adev->pm.mutex);
362 adev->virt.ops->force_dpm_level(adev, level);
363 mutex_unlock(&adev->pm.mutex);
364 return count;
365 } else {
366 return -EINVAL;
367 }
368 }
369
370 if (current_level == level)
371 return count;
372
373
374 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
375 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
376 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
377 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
378 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
379 pr_err("Currently not in any profile mode!\n");
380 return -EINVAL;
381 }
382
383 if (is_support_sw_smu(adev)) {
384 ret = smu_force_performance_level(&adev->smu, level);
385 if (ret)
386 count = -EINVAL;
387 } else if (adev->powerplay.pp_funcs->force_performance_level) {
388 mutex_lock(&adev->pm.mutex);
389 if (adev->pm.dpm.thermal_active) {
390 count = -EINVAL;
391 mutex_unlock(&adev->pm.mutex);
392 goto fail;
393 }
394 ret = amdgpu_dpm_force_performance_level(adev, level);
395 if (ret)
396 count = -EINVAL;
397 else
398 adev->pm.dpm.forced_level = level;
399 mutex_unlock(&adev->pm.mutex);
400 }
401
402fail:
403 return count;
404}
405
406static ssize_t amdgpu_get_pp_num_states(struct device *dev,
407 struct device_attribute *attr,
408 char *buf)
409{
410 struct drm_device *ddev = dev_get_drvdata(dev);
411 struct amdgpu_device *adev = ddev->dev_private;
412 struct pp_states_info data;
413 int i, buf_len, ret;
414
415 if (is_support_sw_smu(adev)) {
416 ret = smu_get_power_num_states(&adev->smu, &data);
417 if (ret)
418 return ret;
419 } else if (adev->powerplay.pp_funcs->get_pp_num_states)
420 amdgpu_dpm_get_pp_num_states(adev, &data);
421
422 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
423 for (i = 0; i < data.nums; i++)
424 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
425 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
426 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
427 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
428 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
429
430 return buf_len;
431}
432
433static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
434 struct device_attribute *attr,
435 char *buf)
436{
437 struct drm_device *ddev = dev_get_drvdata(dev);
438 struct amdgpu_device *adev = ddev->dev_private;
439 struct pp_states_info data;
440 struct smu_context *smu = &adev->smu;
441 enum amd_pm_state_type pm = 0;
442 int i = 0, ret = 0;
443
444 if (is_support_sw_smu(adev)) {
445 pm = smu_get_current_power_state(smu);
446 ret = smu_get_power_num_states(smu, &data);
447 if (ret)
448 return ret;
449 } else if (adev->powerplay.pp_funcs->get_current_power_state
450 && adev->powerplay.pp_funcs->get_pp_num_states) {
451 pm = amdgpu_dpm_get_current_power_state(adev);
452 amdgpu_dpm_get_pp_num_states(adev, &data);
453 }
454
455 for (i = 0; i < data.nums; i++) {
456 if (pm == data.states[i])
457 break;
458 }
459
460 if (i == data.nums)
461 i = -EINVAL;
462
463 return snprintf(buf, PAGE_SIZE, "%d\n", i);
464}
465
466static ssize_t amdgpu_get_pp_force_state(struct device *dev,
467 struct device_attribute *attr,
468 char *buf)
469{
470 struct drm_device *ddev = dev_get_drvdata(dev);
471 struct amdgpu_device *adev = ddev->dev_private;
472
473 if (adev->pp_force_state_enabled)
474 return amdgpu_get_pp_cur_state(dev, attr, buf);
475 else
476 return snprintf(buf, PAGE_SIZE, "\n");
477}
478
479static ssize_t amdgpu_set_pp_force_state(struct device *dev,
480 struct device_attribute *attr,
481 const char *buf,
482 size_t count)
483{
484 struct drm_device *ddev = dev_get_drvdata(dev);
485 struct amdgpu_device *adev = ddev->dev_private;
486 enum amd_pm_state_type state = 0;
487 unsigned long idx;
488 int ret;
489
490 if (strlen(buf) == 1)
491 adev->pp_force_state_enabled = false;
492 else if (is_support_sw_smu(adev))
493 adev->pp_force_state_enabled = false;
494 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
495 adev->powerplay.pp_funcs->get_pp_num_states) {
496 struct pp_states_info data;
497
498 ret = kstrtoul(buf, 0, &idx);
499 if (ret || idx >= ARRAY_SIZE(data.states)) {
500 count = -EINVAL;
501 goto fail;
502 }
503 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
504
505 amdgpu_dpm_get_pp_num_states(adev, &data);
506 state = data.states[idx];
507
508 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
509 state != POWER_STATE_TYPE_DEFAULT) {
510 amdgpu_dpm_dispatch_task(adev,
511 AMD_PP_TASK_ENABLE_USER_STATE, &state);
512 adev->pp_force_state_enabled = true;
513 }
514 }
515fail:
516 return count;
517}
518
519
520
521
522
523
524
525
526
527
528
529
530static ssize_t amdgpu_get_pp_table(struct device *dev,
531 struct device_attribute *attr,
532 char *buf)
533{
534 struct drm_device *ddev = dev_get_drvdata(dev);
535 struct amdgpu_device *adev = ddev->dev_private;
536 char *table = NULL;
537 int size;
538
539 if (is_support_sw_smu(adev)) {
540 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
541 if (size < 0)
542 return size;
543 }
544 else if (adev->powerplay.pp_funcs->get_pp_table)
545 size = amdgpu_dpm_get_pp_table(adev, &table);
546 else
547 return 0;
548
549 if (size >= PAGE_SIZE)
550 size = PAGE_SIZE - 1;
551
552 memcpy(buf, table, size);
553
554 return size;
555}
556
557static ssize_t amdgpu_set_pp_table(struct device *dev,
558 struct device_attribute *attr,
559 const char *buf,
560 size_t count)
561{
562 struct drm_device *ddev = dev_get_drvdata(dev);
563 struct amdgpu_device *adev = ddev->dev_private;
564 int ret = 0;
565
566 if (is_support_sw_smu(adev)) {
567 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
568 if (ret)
569 return ret;
570 } else if (adev->powerplay.pp_funcs->set_pp_table)
571 amdgpu_dpm_set_pp_table(adev, buf, count);
572
573 return count;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
643 struct device_attribute *attr,
644 const char *buf,
645 size_t count)
646{
647 struct drm_device *ddev = dev_get_drvdata(dev);
648 struct amdgpu_device *adev = ddev->dev_private;
649 int ret;
650 uint32_t parameter_size = 0;
651 long parameter[64];
652 char buf_cpy[128];
653 char *tmp_str;
654 char *sub_str;
655 const char delimiter[3] = {' ', '\n', '\0'};
656 uint32_t type;
657
658 if (count > 127)
659 return -EINVAL;
660
661 if (*buf == 's')
662 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
663 else if (*buf == 'm')
664 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
665 else if(*buf == 'r')
666 type = PP_OD_RESTORE_DEFAULT_TABLE;
667 else if (*buf == 'c')
668 type = PP_OD_COMMIT_DPM_TABLE;
669 else if (!strncmp(buf, "vc", 2))
670 type = PP_OD_EDIT_VDDC_CURVE;
671 else
672 return -EINVAL;
673
674 memcpy(buf_cpy, buf, count+1);
675
676 tmp_str = buf_cpy;
677
678 if (type == PP_OD_EDIT_VDDC_CURVE)
679 tmp_str++;
680 while (isspace(*++tmp_str));
681
682 while (tmp_str[0]) {
683 sub_str = strsep(&tmp_str, delimiter);
684 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
685 if (ret)
686 return -EINVAL;
687 parameter_size++;
688
689 while (isspace(*tmp_str))
690 tmp_str++;
691 }
692
693 if (is_support_sw_smu(adev)) {
694 ret = smu_od_edit_dpm_table(&adev->smu, type,
695 parameter, parameter_size);
696
697 if (ret)
698 return -EINVAL;
699 } else {
700 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
701 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
702 parameter, parameter_size);
703 if (ret)
704 return -EINVAL;
705 }
706
707 if (type == PP_OD_COMMIT_DPM_TABLE) {
708 if (adev->powerplay.pp_funcs->dispatch_tasks) {
709 amdgpu_dpm_dispatch_task(adev,
710 AMD_PP_TASK_READJUST_POWER_STATE,
711 NULL);
712 return count;
713 } else {
714 return -EINVAL;
715 }
716 }
717 }
718
719 return count;
720}
721
722static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
723 struct device_attribute *attr,
724 char *buf)
725{
726 struct drm_device *ddev = dev_get_drvdata(dev);
727 struct amdgpu_device *adev = ddev->dev_private;
728 uint32_t size = 0;
729
730 if (is_support_sw_smu(adev)) {
731 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
732 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
733 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
734 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
735 return size;
736 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
737 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
738 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
739 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
740 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
741 return size;
742 } else {
743 return snprintf(buf, PAGE_SIZE, "\n");
744 }
745
746}
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
765 struct device_attribute *attr,
766 const char *buf,
767 size_t count)
768{
769 struct drm_device *ddev = dev_get_drvdata(dev);
770 struct amdgpu_device *adev = ddev->dev_private;
771 uint64_t featuremask;
772 int ret;
773
774 ret = kstrtou64(buf, 0, &featuremask);
775 if (ret)
776 return -EINVAL;
777
778 pr_debug("featuremask = 0x%llx\n", featuremask);
779
780 if (is_support_sw_smu(adev)) {
781 ret = smu_set_ppfeature_status(&adev->smu, featuremask);
782 if (ret)
783 return -EINVAL;
784 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
785 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
786 if (ret)
787 return -EINVAL;
788 }
789
790 return count;
791}
792
793static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
794 struct device_attribute *attr,
795 char *buf)
796{
797 struct drm_device *ddev = dev_get_drvdata(dev);
798 struct amdgpu_device *adev = ddev->dev_private;
799
800 if (is_support_sw_smu(adev)) {
801 return smu_get_ppfeature_status(&adev->smu, buf);
802 } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
803 return amdgpu_dpm_get_ppfeature_status(adev, buf);
804
805 return snprintf(buf, PAGE_SIZE, "\n");
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
834 struct device_attribute *attr,
835 char *buf)
836{
837 struct drm_device *ddev = dev_get_drvdata(dev);
838 struct amdgpu_device *adev = ddev->dev_private;
839
840 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
841 adev->virt.ops->get_pp_clk)
842 return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
843
844 if (is_support_sw_smu(adev))
845 return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
846 else if (adev->powerplay.pp_funcs->print_clock_levels)
847 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
848 else
849 return snprintf(buf, PAGE_SIZE, "\n");
850}
851
852
853
854
855
856#define AMDGPU_MASK_BUF_MAX (32 * 13)
857
858static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
859{
860 int ret;
861 long level;
862 char *sub_str = NULL;
863 char *tmp;
864 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
865 const char delimiter[3] = {' ', '\n', '\0'};
866 size_t bytes;
867
868 *mask = 0;
869
870 bytes = min(count, sizeof(buf_cpy) - 1);
871 memcpy(buf_cpy, buf, bytes);
872 buf_cpy[bytes] = '\0';
873 tmp = buf_cpy;
874 while (tmp[0]) {
875 sub_str = strsep(&tmp, delimiter);
876 if (strlen(sub_str)) {
877 ret = kstrtol(sub_str, 0, &level);
878 if (ret)
879 return -EINVAL;
880 *mask |= 1 << level;
881 } else
882 break;
883 }
884
885 return 0;
886}
887
888static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
889 struct device_attribute *attr,
890 const char *buf,
891 size_t count)
892{
893 struct drm_device *ddev = dev_get_drvdata(dev);
894 struct amdgpu_device *adev = ddev->dev_private;
895 int ret;
896 uint32_t mask = 0;
897
898 if (amdgpu_sriov_vf(adev))
899 return 0;
900
901 ret = amdgpu_read_mask(buf, count, &mask);
902 if (ret)
903 return ret;
904
905 if (is_support_sw_smu(adev))
906 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
907 else if (adev->powerplay.pp_funcs->force_clock_level)
908 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
909
910 if (ret)
911 return -EINVAL;
912
913 return count;
914}
915
916static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
917 struct device_attribute *attr,
918 char *buf)
919{
920 struct drm_device *ddev = dev_get_drvdata(dev);
921 struct amdgpu_device *adev = ddev->dev_private;
922
923 if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
924 adev->virt.ops->get_pp_clk)
925 return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
926
927 if (is_support_sw_smu(adev))
928 return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
929 else if (adev->powerplay.pp_funcs->print_clock_levels)
930 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
931 else
932 return snprintf(buf, PAGE_SIZE, "\n");
933}
934
935static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
936 struct device_attribute *attr,
937 const char *buf,
938 size_t count)
939{
940 struct drm_device *ddev = dev_get_drvdata(dev);
941 struct amdgpu_device *adev = ddev->dev_private;
942 int ret;
943 uint32_t mask = 0;
944
945 if (amdgpu_sriov_vf(adev))
946 return 0;
947
948 ret = amdgpu_read_mask(buf, count, &mask);
949 if (ret)
950 return ret;
951
952 if (is_support_sw_smu(adev))
953 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
954 else if (adev->powerplay.pp_funcs->force_clock_level)
955 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
956
957 if (ret)
958 return -EINVAL;
959
960 return count;
961}
962
963static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
964 struct device_attribute *attr,
965 char *buf)
966{
967 struct drm_device *ddev = dev_get_drvdata(dev);
968 struct amdgpu_device *adev = ddev->dev_private;
969
970 if (is_support_sw_smu(adev))
971 return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
972 else if (adev->powerplay.pp_funcs->print_clock_levels)
973 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
974 else
975 return snprintf(buf, PAGE_SIZE, "\n");
976}
977
978static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
979 struct device_attribute *attr,
980 const char *buf,
981 size_t count)
982{
983 struct drm_device *ddev = dev_get_drvdata(dev);
984 struct amdgpu_device *adev = ddev->dev_private;
985 int ret;
986 uint32_t mask = 0;
987
988 ret = amdgpu_read_mask(buf, count, &mask);
989 if (ret)
990 return ret;
991
992 if (is_support_sw_smu(adev))
993 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
994 else if (adev->powerplay.pp_funcs->force_clock_level)
995 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
996
997 if (ret)
998 return -EINVAL;
999
1000 return count;
1001}
1002
1003static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1004 struct device_attribute *attr,
1005 char *buf)
1006{
1007 struct drm_device *ddev = dev_get_drvdata(dev);
1008 struct amdgpu_device *adev = ddev->dev_private;
1009
1010 if (is_support_sw_smu(adev))
1011 return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1012 else if (adev->powerplay.pp_funcs->print_clock_levels)
1013 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1014 else
1015 return snprintf(buf, PAGE_SIZE, "\n");
1016}
1017
1018static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1019 struct device_attribute *attr,
1020 const char *buf,
1021 size_t count)
1022{
1023 struct drm_device *ddev = dev_get_drvdata(dev);
1024 struct amdgpu_device *adev = ddev->dev_private;
1025 int ret;
1026 uint32_t mask = 0;
1027
1028 ret = amdgpu_read_mask(buf, count, &mask);
1029 if (ret)
1030 return ret;
1031
1032 if (is_support_sw_smu(adev))
1033 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1034 else if (adev->powerplay.pp_funcs->force_clock_level)
1035 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1036
1037 if (ret)
1038 return -EINVAL;
1039
1040 return count;
1041}
1042
1043static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1044 struct device_attribute *attr,
1045 char *buf)
1046{
1047 struct drm_device *ddev = dev_get_drvdata(dev);
1048 struct amdgpu_device *adev = ddev->dev_private;
1049
1050 if (is_support_sw_smu(adev))
1051 return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1052 else if (adev->powerplay.pp_funcs->print_clock_levels)
1053 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1054 else
1055 return snprintf(buf, PAGE_SIZE, "\n");
1056}
1057
1058static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1059 struct device_attribute *attr,
1060 const char *buf,
1061 size_t count)
1062{
1063 struct drm_device *ddev = dev_get_drvdata(dev);
1064 struct amdgpu_device *adev = ddev->dev_private;
1065 int ret;
1066 uint32_t mask = 0;
1067
1068 ret = amdgpu_read_mask(buf, count, &mask);
1069 if (ret)
1070 return ret;
1071
1072 if (is_support_sw_smu(adev))
1073 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1074 else if (adev->powerplay.pp_funcs->force_clock_level)
1075 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1076
1077 if (ret)
1078 return -EINVAL;
1079
1080 return count;
1081}
1082
1083static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1084 struct device_attribute *attr,
1085 char *buf)
1086{
1087 struct drm_device *ddev = dev_get_drvdata(dev);
1088 struct amdgpu_device *adev = ddev->dev_private;
1089
1090 if (is_support_sw_smu(adev))
1091 return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1092 else if (adev->powerplay.pp_funcs->print_clock_levels)
1093 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1094 else
1095 return snprintf(buf, PAGE_SIZE, "\n");
1096}
1097
1098static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1099 struct device_attribute *attr,
1100 const char *buf,
1101 size_t count)
1102{
1103 struct drm_device *ddev = dev_get_drvdata(dev);
1104 struct amdgpu_device *adev = ddev->dev_private;
1105 int ret;
1106 uint32_t mask = 0;
1107
1108 ret = amdgpu_read_mask(buf, count, &mask);
1109 if (ret)
1110 return ret;
1111
1112 if (is_support_sw_smu(adev))
1113 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1114 else if (adev->powerplay.pp_funcs->force_clock_level)
1115 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1116
1117 if (ret)
1118 return -EINVAL;
1119
1120 return count;
1121}
1122
1123static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1124 struct device_attribute *attr,
1125 char *buf)
1126{
1127 struct drm_device *ddev = dev_get_drvdata(dev);
1128 struct amdgpu_device *adev = ddev->dev_private;
1129 uint32_t value = 0;
1130
1131 if (is_support_sw_smu(adev))
1132 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1133 else if (adev->powerplay.pp_funcs->get_sclk_od)
1134 value = amdgpu_dpm_get_sclk_od(adev);
1135
1136 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1137}
1138
1139static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1140 struct device_attribute *attr,
1141 const char *buf,
1142 size_t count)
1143{
1144 struct drm_device *ddev = dev_get_drvdata(dev);
1145 struct amdgpu_device *adev = ddev->dev_private;
1146 int ret;
1147 long int value;
1148
1149 ret = kstrtol(buf, 0, &value);
1150
1151 if (ret) {
1152 count = -EINVAL;
1153 goto fail;
1154 }
1155
1156 if (is_support_sw_smu(adev)) {
1157 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1158 } else {
1159 if (adev->powerplay.pp_funcs->set_sclk_od)
1160 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1161
1162 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1163 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1164 } else {
1165 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1166 amdgpu_pm_compute_clocks(adev);
1167 }
1168 }
1169
1170fail:
1171 return count;
1172}
1173
1174static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1175 struct device_attribute *attr,
1176 char *buf)
1177{
1178 struct drm_device *ddev = dev_get_drvdata(dev);
1179 struct amdgpu_device *adev = ddev->dev_private;
1180 uint32_t value = 0;
1181
1182 if (is_support_sw_smu(adev))
1183 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1184 else if (adev->powerplay.pp_funcs->get_mclk_od)
1185 value = amdgpu_dpm_get_mclk_od(adev);
1186
1187 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1188}
1189
1190static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1191 struct device_attribute *attr,
1192 const char *buf,
1193 size_t count)
1194{
1195 struct drm_device *ddev = dev_get_drvdata(dev);
1196 struct amdgpu_device *adev = ddev->dev_private;
1197 int ret;
1198 long int value;
1199
1200 ret = kstrtol(buf, 0, &value);
1201
1202 if (ret) {
1203 count = -EINVAL;
1204 goto fail;
1205 }
1206
1207 if (is_support_sw_smu(adev)) {
1208 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1209 } else {
1210 if (adev->powerplay.pp_funcs->set_mclk_od)
1211 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1212
1213 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1214 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1215 } else {
1216 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1217 amdgpu_pm_compute_clocks(adev);
1218 }
1219 }
1220
1221fail:
1222 return count;
1223}
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1246 struct device_attribute *attr,
1247 char *buf)
1248{
1249 struct drm_device *ddev = dev_get_drvdata(dev);
1250 struct amdgpu_device *adev = ddev->dev_private;
1251
1252 if (is_support_sw_smu(adev))
1253 return smu_get_power_profile_mode(&adev->smu, buf);
1254 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1255 return amdgpu_dpm_get_power_profile_mode(adev, buf);
1256
1257 return snprintf(buf, PAGE_SIZE, "\n");
1258}
1259
1260
1261static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1262 struct device_attribute *attr,
1263 const char *buf,
1264 size_t count)
1265{
1266 int ret = 0xff;
1267 struct drm_device *ddev = dev_get_drvdata(dev);
1268 struct amdgpu_device *adev = ddev->dev_private;
1269 uint32_t parameter_size = 0;
1270 long parameter[64];
1271 char *sub_str, buf_cpy[128];
1272 char *tmp_str;
1273 uint32_t i = 0;
1274 char tmp[2];
1275 long int profile_mode = 0;
1276 const char delimiter[3] = {' ', '\n', '\0'};
1277
1278 tmp[0] = *(buf);
1279 tmp[1] = '\0';
1280 ret = kstrtol(tmp, 0, &profile_mode);
1281 if (ret)
1282 goto fail;
1283
1284 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1285 if (count < 2 || count > 127)
1286 return -EINVAL;
1287 while (isspace(*++buf))
1288 i++;
1289 memcpy(buf_cpy, buf, count-i);
1290 tmp_str = buf_cpy;
1291 while (tmp_str[0]) {
1292 sub_str = strsep(&tmp_str, delimiter);
1293 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1294 if (ret) {
1295 count = -EINVAL;
1296 goto fail;
1297 }
1298 parameter_size++;
1299 while (isspace(*tmp_str))
1300 tmp_str++;
1301 }
1302 }
1303 parameter[parameter_size] = profile_mode;
1304 if (is_support_sw_smu(adev))
1305 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
1306 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1307 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1308 if (!ret)
1309 return count;
1310fail:
1311 return -EINVAL;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322static ssize_t amdgpu_get_busy_percent(struct device *dev,
1323 struct device_attribute *attr,
1324 char *buf)
1325{
1326 struct drm_device *ddev = dev_get_drvdata(dev);
1327 struct amdgpu_device *adev = ddev->dev_private;
1328 int r, value, size = sizeof(value);
1329
1330
1331 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1332 (void *)&value, &size);
1333
1334 if (r)
1335 return r;
1336
1337 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
1349 struct device_attribute *attr,
1350 char *buf)
1351{
1352 struct drm_device *ddev = dev_get_drvdata(dev);
1353 struct amdgpu_device *adev = ddev->dev_private;
1354 int r, value, size = sizeof(value);
1355
1356
1357 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1358 (void *)&value, &size);
1359
1360 if (r)
1361 return r;
1362
1363 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1379 struct device_attribute *attr,
1380 char *buf)
1381{
1382 struct drm_device *ddev = dev_get_drvdata(dev);
1383 struct amdgpu_device *adev = ddev->dev_private;
1384 uint64_t count0, count1;
1385
1386 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1387 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1388 count0, count1, pcie_get_mps(adev->pdev));
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static ssize_t amdgpu_get_unique_id(struct device *dev,
1402 struct device_attribute *attr,
1403 char *buf)
1404{
1405 struct drm_device *ddev = dev_get_drvdata(dev);
1406 struct amdgpu_device *adev = ddev->dev_private;
1407
1408 if (adev->unique_id)
1409 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1410
1411 return 0;
1412}
1413
1414static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
1415static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
1416 amdgpu_get_dpm_forced_performance_level,
1417 amdgpu_set_dpm_forced_performance_level);
1418static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
1419static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
1420static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
1421 amdgpu_get_pp_force_state,
1422 amdgpu_set_pp_force_state);
1423static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
1424 amdgpu_get_pp_table,
1425 amdgpu_set_pp_table);
1426static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1427 amdgpu_get_pp_dpm_sclk,
1428 amdgpu_set_pp_dpm_sclk);
1429static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1430 amdgpu_get_pp_dpm_mclk,
1431 amdgpu_set_pp_dpm_mclk);
1432static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1433 amdgpu_get_pp_dpm_socclk,
1434 amdgpu_set_pp_dpm_socclk);
1435static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1436 amdgpu_get_pp_dpm_fclk,
1437 amdgpu_set_pp_dpm_fclk);
1438static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1439 amdgpu_get_pp_dpm_dcefclk,
1440 amdgpu_set_pp_dpm_dcefclk);
1441static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1442 amdgpu_get_pp_dpm_pcie,
1443 amdgpu_set_pp_dpm_pcie);
1444static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
1445 amdgpu_get_pp_sclk_od,
1446 amdgpu_set_pp_sclk_od);
1447static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
1448 amdgpu_get_pp_mclk_od,
1449 amdgpu_set_pp_mclk_od);
1450static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
1451 amdgpu_get_pp_power_profile_mode,
1452 amdgpu_set_pp_power_profile_mode);
1453static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1454 amdgpu_get_pp_od_clk_voltage,
1455 amdgpu_set_pp_od_clk_voltage);
1456static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1457 amdgpu_get_busy_percent, NULL);
1458static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
1459 amdgpu_get_memory_busy_percent, NULL);
1460static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1461static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
1462 amdgpu_get_ppfeature_status,
1463 amdgpu_set_ppfeature_status);
1464static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
1465
1466static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1467 struct device_attribute *attr,
1468 char *buf)
1469{
1470 struct amdgpu_device *adev = dev_get_drvdata(dev);
1471 struct drm_device *ddev = adev->ddev;
1472 int channel = to_sensor_dev_attr(attr)->index;
1473 int r, temp = 0, size = sizeof(temp);
1474
1475
1476 if ((adev->flags & AMD_IS_PX) &&
1477 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1478 return -EINVAL;
1479
1480 if (channel >= PP_TEMP_MAX)
1481 return -EINVAL;
1482
1483 switch (channel) {
1484 case PP_TEMP_JUNCTION:
1485
1486 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1487 (void *)&temp, &size);
1488 if (r)
1489 return r;
1490 break;
1491 case PP_TEMP_EDGE:
1492
1493 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
1494 (void *)&temp, &size);
1495 if (r)
1496 return r;
1497 break;
1498 case PP_TEMP_MEM:
1499
1500 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
1501 (void *)&temp, &size);
1502 if (r)
1503 return r;
1504 break;
1505 }
1506
1507 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1508}
1509
1510static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1511 struct device_attribute *attr,
1512 char *buf)
1513{
1514 struct amdgpu_device *adev = dev_get_drvdata(dev);
1515 int hyst = to_sensor_dev_attr(attr)->index;
1516 int temp;
1517
1518 if (hyst)
1519 temp = adev->pm.dpm.thermal.min_temp;
1520 else
1521 temp = adev->pm.dpm.thermal.max_temp;
1522
1523 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1524}
1525
1526static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
1527 struct device_attribute *attr,
1528 char *buf)
1529{
1530 struct amdgpu_device *adev = dev_get_drvdata(dev);
1531 int hyst = to_sensor_dev_attr(attr)->index;
1532 int temp;
1533
1534 if (hyst)
1535 temp = adev->pm.dpm.thermal.min_hotspot_temp;
1536 else
1537 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
1538
1539 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1540}
1541
1542static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
1543 struct device_attribute *attr,
1544 char *buf)
1545{
1546 struct amdgpu_device *adev = dev_get_drvdata(dev);
1547 int hyst = to_sensor_dev_attr(attr)->index;
1548 int temp;
1549
1550 if (hyst)
1551 temp = adev->pm.dpm.thermal.min_mem_temp;
1552 else
1553 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
1554
1555 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1556}
1557
1558static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
1559 struct device_attribute *attr,
1560 char *buf)
1561{
1562 int channel = to_sensor_dev_attr(attr)->index;
1563
1564 if (channel >= PP_TEMP_MAX)
1565 return -EINVAL;
1566
1567 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
1568}
1569
1570static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
1571 struct device_attribute *attr,
1572 char *buf)
1573{
1574 struct amdgpu_device *adev = dev_get_drvdata(dev);
1575 int channel = to_sensor_dev_attr(attr)->index;
1576 int temp = 0;
1577
1578 if (channel >= PP_TEMP_MAX)
1579 return -EINVAL;
1580
1581 switch (channel) {
1582 case PP_TEMP_JUNCTION:
1583 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
1584 break;
1585 case PP_TEMP_EDGE:
1586 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
1587 break;
1588 case PP_TEMP_MEM:
1589 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
1590 break;
1591 }
1592
1593 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1594}
1595
1596static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1597 struct device_attribute *attr,
1598 char *buf)
1599{
1600 struct amdgpu_device *adev = dev_get_drvdata(dev);
1601 u32 pwm_mode = 0;
1602 if (is_support_sw_smu(adev)) {
1603 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1604 } else {
1605 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1606 return -EINVAL;
1607
1608 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1609 }
1610
1611 return sprintf(buf, "%i\n", pwm_mode);
1612}
1613
1614static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
1615 struct device_attribute *attr,
1616 const char *buf,
1617 size_t count)
1618{
1619 struct amdgpu_device *adev = dev_get_drvdata(dev);
1620 int err;
1621 int value;
1622
1623
1624 if ((adev->flags & AMD_IS_PX) &&
1625 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1626 return -EINVAL;
1627
1628 if (is_support_sw_smu(adev)) {
1629 err = kstrtoint(buf, 10, &value);
1630 if (err)
1631 return err;
1632
1633 smu_set_fan_control_mode(&adev->smu, value);
1634 } else {
1635 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1636 return -EINVAL;
1637
1638 err = kstrtoint(buf, 10, &value);
1639 if (err)
1640 return err;
1641
1642 amdgpu_dpm_set_fan_control_mode(adev, value);
1643 }
1644
1645 return count;
1646}
1647
1648static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
1649 struct device_attribute *attr,
1650 char *buf)
1651{
1652 return sprintf(buf, "%i\n", 0);
1653}
1654
1655static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
1656 struct device_attribute *attr,
1657 char *buf)
1658{
1659 return sprintf(buf, "%i\n", 255);
1660}
1661
1662static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
1663 struct device_attribute *attr,
1664 const char *buf, size_t count)
1665{
1666 struct amdgpu_device *adev = dev_get_drvdata(dev);
1667 int err;
1668 u32 value;
1669 u32 pwm_mode;
1670
1671
1672 if ((adev->flags & AMD_IS_PX) &&
1673 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1674 return -EINVAL;
1675 if (is_support_sw_smu(adev))
1676 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1677 else
1678 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1679 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
1680 pr_info("manual fan speed control should be enabled first\n");
1681 return -EINVAL;
1682 }
1683
1684 err = kstrtou32(buf, 10, &value);
1685 if (err)
1686 return err;
1687
1688 value = (value * 100) / 255;
1689
1690 if (is_support_sw_smu(adev)) {
1691 err = smu_set_fan_speed_percent(&adev->smu, value);
1692 if (err)
1693 return err;
1694 } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
1695 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
1696 if (err)
1697 return err;
1698 }
1699
1700 return count;
1701}
1702
1703static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
1704 struct device_attribute *attr,
1705 char *buf)
1706{
1707 struct amdgpu_device *adev = dev_get_drvdata(dev);
1708 int err;
1709 u32 speed = 0;
1710
1711
1712 if ((adev->flags & AMD_IS_PX) &&
1713 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1714 return -EINVAL;
1715
1716 if (is_support_sw_smu(adev)) {
1717 err = smu_get_fan_speed_percent(&adev->smu, &speed);
1718 if (err)
1719 return err;
1720 } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
1721 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
1722 if (err)
1723 return err;
1724 }
1725
1726 speed = (speed * 255) / 100;
1727
1728 return sprintf(buf, "%i\n", speed);
1729}
1730
1731static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1732 struct device_attribute *attr,
1733 char *buf)
1734{
1735 struct amdgpu_device *adev = dev_get_drvdata(dev);
1736 int err;
1737 u32 speed = 0;
1738
1739
1740 if ((adev->flags & AMD_IS_PX) &&
1741 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1742 return -EINVAL;
1743
1744 if (is_support_sw_smu(adev)) {
1745 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
1746 if (err)
1747 return err;
1748 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1749 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
1750 if (err)
1751 return err;
1752 }
1753
1754 return sprintf(buf, "%i\n", speed);
1755}
1756
1757static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
1758 struct device_attribute *attr,
1759 char *buf)
1760{
1761 struct amdgpu_device *adev = dev_get_drvdata(dev);
1762 u32 min_rpm = 0;
1763 u32 size = sizeof(min_rpm);
1764 int r;
1765
1766 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
1767 (void *)&min_rpm, &size);
1768 if (r)
1769 return r;
1770
1771 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
1772}
1773
1774static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
1775 struct device_attribute *attr,
1776 char *buf)
1777{
1778 struct amdgpu_device *adev = dev_get_drvdata(dev);
1779 u32 max_rpm = 0;
1780 u32 size = sizeof(max_rpm);
1781 int r;
1782
1783 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
1784 (void *)&max_rpm, &size);
1785 if (r)
1786 return r;
1787
1788 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
1789}
1790
1791static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
1792 struct device_attribute *attr,
1793 char *buf)
1794{
1795 struct amdgpu_device *adev = dev_get_drvdata(dev);
1796 int err;
1797 u32 rpm = 0;
1798
1799
1800 if ((adev->flags & AMD_IS_PX) &&
1801 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1802 return -EINVAL;
1803
1804 if (is_support_sw_smu(adev)) {
1805 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
1806 if (err)
1807 return err;
1808 } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1809 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
1810 if (err)
1811 return err;
1812 }
1813
1814 return sprintf(buf, "%i\n", rpm);
1815}
1816
1817static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
1818 struct device_attribute *attr,
1819 const char *buf, size_t count)
1820{
1821 struct amdgpu_device *adev = dev_get_drvdata(dev);
1822 int err;
1823 u32 value;
1824 u32 pwm_mode;
1825
1826 if (is_support_sw_smu(adev))
1827 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1828 else
1829 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1830
1831 if (pwm_mode != AMD_FAN_CTRL_MANUAL)
1832 return -ENODATA;
1833
1834
1835 if ((adev->flags & AMD_IS_PX) &&
1836 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1837 return -EINVAL;
1838
1839 err = kstrtou32(buf, 10, &value);
1840 if (err)
1841 return err;
1842
1843 if (is_support_sw_smu(adev)) {
1844 err = smu_set_fan_speed_rpm(&adev->smu, value);
1845 if (err)
1846 return err;
1847 } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
1848 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
1849 if (err)
1850 return err;
1851 }
1852
1853 return count;
1854}
1855
1856static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
1857 struct device_attribute *attr,
1858 char *buf)
1859{
1860 struct amdgpu_device *adev = dev_get_drvdata(dev);
1861 u32 pwm_mode = 0;
1862
1863 if (is_support_sw_smu(adev)) {
1864 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1865 } else {
1866 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1867 return -EINVAL;
1868
1869 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1870 }
1871 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
1872}
1873
1874static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
1875 struct device_attribute *attr,
1876 const char *buf,
1877 size_t count)
1878{
1879 struct amdgpu_device *adev = dev_get_drvdata(dev);
1880 int err;
1881 int value;
1882 u32 pwm_mode;
1883
1884
1885 if ((adev->flags & AMD_IS_PX) &&
1886 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1887 return -EINVAL;
1888
1889
1890 err = kstrtoint(buf, 10, &value);
1891 if (err)
1892 return err;
1893
1894 if (value == 0)
1895 pwm_mode = AMD_FAN_CTRL_AUTO;
1896 else if (value == 1)
1897 pwm_mode = AMD_FAN_CTRL_MANUAL;
1898 else
1899 return -EINVAL;
1900
1901 if (is_support_sw_smu(adev)) {
1902 smu_set_fan_control_mode(&adev->smu, pwm_mode);
1903 } else {
1904 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1905 return -EINVAL;
1906 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
1907 }
1908
1909 return count;
1910}
1911
1912static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
1913 struct device_attribute *attr,
1914 char *buf)
1915{
1916 struct amdgpu_device *adev = dev_get_drvdata(dev);
1917 struct drm_device *ddev = adev->ddev;
1918 u32 vddgfx;
1919 int r, size = sizeof(vddgfx);
1920
1921
1922 if ((adev->flags & AMD_IS_PX) &&
1923 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1924 return -EINVAL;
1925
1926
1927 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
1928 (void *)&vddgfx, &size);
1929 if (r)
1930 return r;
1931
1932 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
1933}
1934
1935static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
1936 struct device_attribute *attr,
1937 char *buf)
1938{
1939 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
1940}
1941
1942static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1943 struct device_attribute *attr,
1944 char *buf)
1945{
1946 struct amdgpu_device *adev = dev_get_drvdata(dev);
1947 struct drm_device *ddev = adev->ddev;
1948 u32 vddnb;
1949 int r, size = sizeof(vddnb);
1950
1951
1952 if (!(adev->flags & AMD_IS_APU))
1953 return -EINVAL;
1954
1955
1956 if ((adev->flags & AMD_IS_PX) &&
1957 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1958 return -EINVAL;
1959
1960
1961 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1962 (void *)&vddnb, &size);
1963 if (r)
1964 return r;
1965
1966 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1967}
1968
1969static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1970 struct device_attribute *attr,
1971 char *buf)
1972{
1973 return snprintf(buf, PAGE_SIZE, "vddnb\n");
1974}
1975
1976static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1977 struct device_attribute *attr,
1978 char *buf)
1979{
1980 struct amdgpu_device *adev = dev_get_drvdata(dev);
1981 struct drm_device *ddev = adev->ddev;
1982 u32 query = 0;
1983 int r, size = sizeof(u32);
1984 unsigned uw;
1985
1986
1987 if ((adev->flags & AMD_IS_PX) &&
1988 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1989 return -EINVAL;
1990
1991
1992 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1993 (void *)&query, &size);
1994 if (r)
1995 return r;
1996
1997
1998 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1999
2000 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2001}
2002
2003static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2004 struct device_attribute *attr,
2005 char *buf)
2006{
2007 return sprintf(buf, "%i\n", 0);
2008}
2009
2010static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2011 struct device_attribute *attr,
2012 char *buf)
2013{
2014 struct amdgpu_device *adev = dev_get_drvdata(dev);
2015 uint32_t limit = 0;
2016
2017 if (is_support_sw_smu(adev)) {
2018 smu_get_power_limit(&adev->smu, &limit, true);
2019 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2020 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2021 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2022 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2023 } else {
2024 return snprintf(buf, PAGE_SIZE, "\n");
2025 }
2026}
2027
2028static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2029 struct device_attribute *attr,
2030 char *buf)
2031{
2032 struct amdgpu_device *adev = dev_get_drvdata(dev);
2033 uint32_t limit = 0;
2034
2035 if (is_support_sw_smu(adev)) {
2036 smu_get_power_limit(&adev->smu, &limit, false);
2037 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2038 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2039 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2040 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2041 } else {
2042 return snprintf(buf, PAGE_SIZE, "\n");
2043 }
2044}
2045
2046
2047static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2048 struct device_attribute *attr,
2049 const char *buf,
2050 size_t count)
2051{
2052 struct amdgpu_device *adev = dev_get_drvdata(dev);
2053 int err;
2054 u32 value;
2055
2056 err = kstrtou32(buf, 10, &value);
2057 if (err)
2058 return err;
2059
2060 value = value / 1000000;
2061 if (is_support_sw_smu(adev)) {
2062 adev->smu.funcs->set_power_limit(&adev->smu, value);
2063 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
2064 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2065 if (err)
2066 return err;
2067 } else {
2068 return -EINVAL;
2069 }
2070
2071 return count;
2072}
2073
2074static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2075 struct device_attribute *attr,
2076 char *buf)
2077{
2078 struct amdgpu_device *adev = dev_get_drvdata(dev);
2079 struct drm_device *ddev = adev->ddev;
2080 uint32_t sclk;
2081 int r, size = sizeof(sclk);
2082
2083
2084 if ((adev->flags & AMD_IS_PX) &&
2085 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2086 return -EINVAL;
2087
2088
2089 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2090 (void *)&sclk, &size);
2091 if (r)
2092 return r;
2093
2094 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
2095}
2096
2097static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2098 struct device_attribute *attr,
2099 char *buf)
2100{
2101 return snprintf(buf, PAGE_SIZE, "sclk\n");
2102}
2103
2104static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2105 struct device_attribute *attr,
2106 char *buf)
2107{
2108 struct amdgpu_device *adev = dev_get_drvdata(dev);
2109 struct drm_device *ddev = adev->ddev;
2110 uint32_t mclk;
2111 int r, size = sizeof(mclk);
2112
2113
2114 if ((adev->flags & AMD_IS_PX) &&
2115 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
2116 return -EINVAL;
2117
2118
2119 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2120 (void *)&mclk, &size);
2121 if (r)
2122 return r;
2123
2124 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
2125}
2126
2127static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2128 struct device_attribute *attr,
2129 char *buf)
2130{
2131 return snprintf(buf, PAGE_SIZE, "mclk\n");
2132}
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
2217static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2218static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2219static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
2220static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
2221static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
2222static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
2223static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
2224static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
2225static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
2226static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
2227static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2228static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
2229static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
2230static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
2231static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2232static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2233static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2234static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2235static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2236static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2237static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2238static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2239static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2240static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2241static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2242static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2243static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2244static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2245static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2246static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2247static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2248static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2249static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2250static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2251static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2252
2253static struct attribute *hwmon_attributes[] = {
2254 &sensor_dev_attr_temp1_input.dev_attr.attr,
2255 &sensor_dev_attr_temp1_crit.dev_attr.attr,
2256 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2257 &sensor_dev_attr_temp2_input.dev_attr.attr,
2258 &sensor_dev_attr_temp2_crit.dev_attr.attr,
2259 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
2260 &sensor_dev_attr_temp3_input.dev_attr.attr,
2261 &sensor_dev_attr_temp3_crit.dev_attr.attr,
2262 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
2263 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
2264 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
2265 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2266 &sensor_dev_attr_temp1_label.dev_attr.attr,
2267 &sensor_dev_attr_temp2_label.dev_attr.attr,
2268 &sensor_dev_attr_temp3_label.dev_attr.attr,
2269 &sensor_dev_attr_pwm1.dev_attr.attr,
2270 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2271 &sensor_dev_attr_pwm1_min.dev_attr.attr,
2272 &sensor_dev_attr_pwm1_max.dev_attr.attr,
2273 &sensor_dev_attr_fan1_input.dev_attr.attr,
2274 &sensor_dev_attr_fan1_min.dev_attr.attr,
2275 &sensor_dev_attr_fan1_max.dev_attr.attr,
2276 &sensor_dev_attr_fan1_target.dev_attr.attr,
2277 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2278 &sensor_dev_attr_in0_input.dev_attr.attr,
2279 &sensor_dev_attr_in0_label.dev_attr.attr,
2280 &sensor_dev_attr_in1_input.dev_attr.attr,
2281 &sensor_dev_attr_in1_label.dev_attr.attr,
2282 &sensor_dev_attr_power1_average.dev_attr.attr,
2283 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2284 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2285 &sensor_dev_attr_power1_cap.dev_attr.attr,
2286 &sensor_dev_attr_freq1_input.dev_attr.attr,
2287 &sensor_dev_attr_freq1_label.dev_attr.attr,
2288 &sensor_dev_attr_freq2_input.dev_attr.attr,
2289 &sensor_dev_attr_freq2_label.dev_attr.attr,
2290 NULL
2291};
2292
2293static umode_t hwmon_attributes_visible(struct kobject *kobj,
2294 struct attribute *attr, int index)
2295{
2296 struct device *dev = kobj_to_dev(kobj);
2297 struct amdgpu_device *adev = dev_get_drvdata(dev);
2298 umode_t effective_mode = attr->mode;
2299
2300
2301 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2302 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2303 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2304 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2305 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2306 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2307 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2308 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2309 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2310 return 0;
2311
2312
2313 if ((adev->flags & AMD_IS_APU) &&
2314 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2315 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2316 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2317 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2318 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2319 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2320 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2321 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2322 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2323 return 0;
2324
2325
2326 if (!adev->pm.dpm_enabled &&
2327 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2328 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2329 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2330 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2331 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2332 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2333 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2334 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2335 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2336 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2337 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2338 return 0;
2339
2340 if (!is_support_sw_smu(adev)) {
2341
2342 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2343 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
2344 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2345 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
2346 effective_mode &= ~S_IRUGO;
2347
2348 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2349 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
2350 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2351 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
2352 effective_mode &= ~S_IWUSR;
2353 }
2354
2355 if ((adev->flags & AMD_IS_APU) &&
2356 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2357 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2358 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2359 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2360 return 0;
2361
2362 if (!is_support_sw_smu(adev)) {
2363
2364 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2365 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2366 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2367 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2368 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2369 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
2370 return 0;
2371
2372 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2373 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2374 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2375 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2376 return 0;
2377 }
2378
2379
2380 if (!(adev->flags & AMD_IS_APU) &&
2381 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2382 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2383 return 0;
2384
2385
2386 if ((adev->flags & AMD_IS_APU) &&
2387 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2388 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2389 return 0;
2390
2391
2392 if (((adev->flags & AMD_IS_APU) ||
2393 adev->asic_type < CHIP_VEGA10) &&
2394 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
2395 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
2396 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
2397 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
2398 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
2399 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
2400 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
2401 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
2402 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
2403 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
2404 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
2405 return 0;
2406
2407 return effective_mode;
2408}
2409
2410static const struct attribute_group hwmon_attrgroup = {
2411 .attrs = hwmon_attributes,
2412 .is_visible = hwmon_attributes_visible,
2413};
2414
2415static const struct attribute_group *hwmon_groups[] = {
2416 &hwmon_attrgroup,
2417 NULL
2418};
2419
2420void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2421{
2422 struct amdgpu_device *adev =
2423 container_of(work, struct amdgpu_device,
2424 pm.dpm.thermal.work);
2425
2426 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2427 int temp, size = sizeof(temp);
2428
2429 if (!adev->pm.dpm_enabled)
2430 return;
2431
2432 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2433 (void *)&temp, &size)) {
2434 if (temp < adev->pm.dpm.thermal.min_temp)
2435
2436 dpm_state = adev->pm.dpm.user_state;
2437 } else {
2438 if (adev->pm.dpm.thermal.high_to_low)
2439
2440 dpm_state = adev->pm.dpm.user_state;
2441 }
2442 mutex_lock(&adev->pm.mutex);
2443 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2444 adev->pm.dpm.thermal_active = true;
2445 else
2446 adev->pm.dpm.thermal_active = false;
2447 adev->pm.dpm.state = dpm_state;
2448 mutex_unlock(&adev->pm.mutex);
2449
2450 amdgpu_pm_compute_clocks(adev);
2451}
2452
2453static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2454 enum amd_pm_state_type dpm_state)
2455{
2456 int i;
2457 struct amdgpu_ps *ps;
2458 u32 ui_class;
2459 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2460 true : false;
2461
2462
2463 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2464 if (amdgpu_dpm_vblank_too_short(adev))
2465 single_display = false;
2466 }
2467
2468
2469
2470
2471 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
2472 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2473
2474 if (dpm_state == POWER_STATE_TYPE_BALANCED)
2475 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2476
2477restart_search:
2478
2479 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2480 ps = &adev->pm.dpm.ps[i];
2481 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
2482 switch (dpm_state) {
2483
2484 case POWER_STATE_TYPE_BATTERY:
2485 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
2486 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2487 if (single_display)
2488 return ps;
2489 } else
2490 return ps;
2491 }
2492 break;
2493 case POWER_STATE_TYPE_BALANCED:
2494 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
2495 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2496 if (single_display)
2497 return ps;
2498 } else
2499 return ps;
2500 }
2501 break;
2502 case POWER_STATE_TYPE_PERFORMANCE:
2503 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2504 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2505 if (single_display)
2506 return ps;
2507 } else
2508 return ps;
2509 }
2510 break;
2511
2512 case POWER_STATE_TYPE_INTERNAL_UVD:
2513 if (adev->pm.dpm.uvd_ps)
2514 return adev->pm.dpm.uvd_ps;
2515 else
2516 break;
2517 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2518 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
2519 return ps;
2520 break;
2521 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2522 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
2523 return ps;
2524 break;
2525 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2526 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
2527 return ps;
2528 break;
2529 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2530 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
2531 return ps;
2532 break;
2533 case POWER_STATE_TYPE_INTERNAL_BOOT:
2534 return adev->pm.dpm.boot_ps;
2535 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2536 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
2537 return ps;
2538 break;
2539 case POWER_STATE_TYPE_INTERNAL_ACPI:
2540 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
2541 return ps;
2542 break;
2543 case POWER_STATE_TYPE_INTERNAL_ULV:
2544 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
2545 return ps;
2546 break;
2547 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2548 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2549 return ps;
2550 break;
2551 default:
2552 break;
2553 }
2554 }
2555
2556 switch (dpm_state) {
2557 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2558 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
2559 goto restart_search;
2560 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2561 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2562 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2563 if (adev->pm.dpm.uvd_ps) {
2564 return adev->pm.dpm.uvd_ps;
2565 } else {
2566 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2567 goto restart_search;
2568 }
2569 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2570 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
2571 goto restart_search;
2572 case POWER_STATE_TYPE_INTERNAL_ACPI:
2573 dpm_state = POWER_STATE_TYPE_BATTERY;
2574 goto restart_search;
2575 case POWER_STATE_TYPE_BATTERY:
2576 case POWER_STATE_TYPE_BALANCED:
2577 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2578 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2579 goto restart_search;
2580 default:
2581 break;
2582 }
2583
2584 return NULL;
2585}
2586
2587static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
2588{
2589 struct amdgpu_ps *ps;
2590 enum amd_pm_state_type dpm_state;
2591 int ret;
2592 bool equal = false;
2593
2594
2595 if (!adev->pm.dpm_enabled)
2596 return;
2597
2598 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
2599
2600 if ((!adev->pm.dpm.thermal_active) &&
2601 (!adev->pm.dpm.uvd_active))
2602 adev->pm.dpm.state = adev->pm.dpm.user_state;
2603 }
2604 dpm_state = adev->pm.dpm.state;
2605
2606 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
2607 if (ps)
2608 adev->pm.dpm.requested_ps = ps;
2609 else
2610 return;
2611
2612 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
2613 printk("switching from power state:\n");
2614 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
2615 printk("switching to power state:\n");
2616 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
2617 }
2618
2619
2620 ps->vce_active = adev->pm.dpm.vce_active;
2621 if (adev->powerplay.pp_funcs->display_configuration_changed)
2622 amdgpu_dpm_display_configuration_changed(adev);
2623
2624 ret = amdgpu_dpm_pre_set_power_state(adev);
2625 if (ret)
2626 return;
2627
2628 if (adev->powerplay.pp_funcs->check_state_equal) {
2629 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
2630 equal = false;
2631 }
2632
2633 if (equal)
2634 return;
2635
2636 amdgpu_dpm_set_power_state(adev);
2637 amdgpu_dpm_post_set_power_state(adev);
2638
2639 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2640 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2641
2642 if (adev->powerplay.pp_funcs->force_performance_level) {
2643 if (adev->pm.dpm.thermal_active) {
2644 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
2645
2646 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
2647
2648 adev->pm.dpm.forced_level = level;
2649 } else {
2650
2651 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
2652 }
2653 }
2654}
2655
2656void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
2657{
2658 int ret = 0;
2659 if (is_support_sw_smu(adev)) {
2660 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
2661 if (ret)
2662 DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
2663 enable ? "true" : "false", ret);
2664 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2665
2666 mutex_lock(&adev->pm.mutex);
2667 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
2668 mutex_unlock(&adev->pm.mutex);
2669 }
2670
2671 if (adev->asic_type == CHIP_STONEY &&
2672 adev->uvd.decode_image_width >= WIDTH_4K) {
2673 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2674
2675 if (hwmgr && hwmgr->hwmgr_func &&
2676 hwmgr->hwmgr_func->update_nbdpm_pstate)
2677 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
2678 !enable,
2679 true);
2680 }
2681}
2682
2683void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
2684{
2685 int ret = 0;
2686 if (is_support_sw_smu(adev)) {
2687 ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
2688 if (ret)
2689 DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
2690 enable ? "true" : "false", ret);
2691 } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2692
2693 mutex_lock(&adev->pm.mutex);
2694 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
2695 mutex_unlock(&adev->pm.mutex);
2696 }
2697}
2698
2699void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2700{
2701 int i;
2702
2703 if (adev->powerplay.pp_funcs->print_power_state == NULL)
2704 return;
2705
2706 for (i = 0; i < adev->pm.dpm.num_ps; i++)
2707 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
2708
2709}
2710
2711int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev)
2712{
2713 int ret = 0;
2714
2715 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
2716 return ret;
2717
2718 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2719 if (ret) {
2720 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2721 return ret;
2722 }
2723
2724 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2725 if (ret) {
2726 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2727 return ret;
2728 }
2729
2730 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2731 if (ret) {
2732 DRM_ERROR("failed to create device file for dpm state\n");
2733 return ret;
2734 }
2735
2736 return ret;
2737}
2738
2739void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev)
2740{
2741 if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)))
2742 return;
2743
2744 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2745 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2746 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2747}
2748
2749int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
2750{
2751 int r;
2752
2753 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
2754 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
2755 if (r) {
2756 pr_err("smu firmware loading failed\n");
2757 return r;
2758 }
2759 *smu_version = adev->pm.fw_version;
2760 }
2761 return 0;
2762}
2763
2764int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2765{
2766 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2767 int ret;
2768
2769 if (adev->pm.sysfs_initialized)
2770 return 0;
2771
2772 if (adev->pm.dpm_enabled == 0)
2773 return 0;
2774
2775 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
2776 DRIVER_NAME, adev,
2777 hwmon_groups);
2778 if (IS_ERR(adev->pm.int_hwmon_dev)) {
2779 ret = PTR_ERR(adev->pm.int_hwmon_dev);
2780 dev_err(adev->dev,
2781 "Unable to register hwmon device: %d\n", ret);
2782 return ret;
2783 }
2784
2785 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
2786 if (ret) {
2787 DRM_ERROR("failed to create device file for dpm state\n");
2788 return ret;
2789 }
2790 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2791 if (ret) {
2792 DRM_ERROR("failed to create device file for dpm state\n");
2793 return ret;
2794 }
2795
2796
2797 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
2798 if (ret) {
2799 DRM_ERROR("failed to create device file pp_num_states\n");
2800 return ret;
2801 }
2802 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
2803 if (ret) {
2804 DRM_ERROR("failed to create device file pp_cur_state\n");
2805 return ret;
2806 }
2807 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
2808 if (ret) {
2809 DRM_ERROR("failed to create device file pp_force_state\n");
2810 return ret;
2811 }
2812 ret = device_create_file(adev->dev, &dev_attr_pp_table);
2813 if (ret) {
2814 DRM_ERROR("failed to create device file pp_table\n");
2815 return ret;
2816 }
2817
2818 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2819 if (ret) {
2820 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2821 return ret;
2822 }
2823 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2824 if (ret) {
2825 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2826 return ret;
2827 }
2828 if (adev->asic_type >= CHIP_VEGA10) {
2829 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
2830 if (ret) {
2831 DRM_ERROR("failed to create device file pp_dpm_socclk\n");
2832 return ret;
2833 }
2834 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2835 if (ret) {
2836 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
2837 return ret;
2838 }
2839 }
2840 if (adev->asic_type >= CHIP_VEGA20) {
2841 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
2842 if (ret) {
2843 DRM_ERROR("failed to create device file pp_dpm_fclk\n");
2844 return ret;
2845 }
2846 }
2847 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
2848 if (ret) {
2849 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
2850 return ret;
2851 }
2852 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
2853 if (ret) {
2854 DRM_ERROR("failed to create device file pp_sclk_od\n");
2855 return ret;
2856 }
2857 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
2858 if (ret) {
2859 DRM_ERROR("failed to create device file pp_mclk_od\n");
2860 return ret;
2861 }
2862 ret = device_create_file(adev->dev,
2863 &dev_attr_pp_power_profile_mode);
2864 if (ret) {
2865 DRM_ERROR("failed to create device file "
2866 "pp_power_profile_mode\n");
2867 return ret;
2868 }
2869 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2870 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
2871 ret = device_create_file(adev->dev,
2872 &dev_attr_pp_od_clk_voltage);
2873 if (ret) {
2874 DRM_ERROR("failed to create device file "
2875 "pp_od_clk_voltage\n");
2876 return ret;
2877 }
2878 }
2879 ret = device_create_file(adev->dev,
2880 &dev_attr_gpu_busy_percent);
2881 if (ret) {
2882 DRM_ERROR("failed to create device file "
2883 "gpu_busy_level\n");
2884 return ret;
2885 }
2886
2887 if (!(adev->flags & AMD_IS_APU) &&
2888 (adev->asic_type != CHIP_VEGA10)) {
2889 ret = device_create_file(adev->dev,
2890 &dev_attr_mem_busy_percent);
2891 if (ret) {
2892 DRM_ERROR("failed to create device file "
2893 "mem_busy_percent\n");
2894 return ret;
2895 }
2896 }
2897
2898 if (!(adev->flags & AMD_IS_APU)) {
2899 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
2900 if (ret) {
2901 DRM_ERROR("failed to create device file pcie_bw\n");
2902 return ret;
2903 }
2904 }
2905 if (adev->unique_id)
2906 ret = device_create_file(adev->dev, &dev_attr_unique_id);
2907 if (ret) {
2908 DRM_ERROR("failed to create device file unique_id\n");
2909 return ret;
2910 }
2911 ret = amdgpu_debugfs_pm_init(adev);
2912 if (ret) {
2913 DRM_ERROR("Failed to register debugfs file for dpm!\n");
2914 return ret;
2915 }
2916
2917 if ((adev->asic_type >= CHIP_VEGA10) &&
2918 !(adev->flags & AMD_IS_APU)) {
2919 ret = device_create_file(adev->dev,
2920 &dev_attr_ppfeatures);
2921 if (ret) {
2922 DRM_ERROR("failed to create device file "
2923 "ppfeatures\n");
2924 return ret;
2925 }
2926 }
2927
2928 adev->pm.sysfs_initialized = true;
2929
2930 return 0;
2931}
2932
2933void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2934{
2935 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2936
2937 if (adev->pm.dpm_enabled == 0)
2938 return;
2939
2940 if (adev->pm.int_hwmon_dev)
2941 hwmon_device_unregister(adev->pm.int_hwmon_dev);
2942 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
2943 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2944
2945 device_remove_file(adev->dev, &dev_attr_pp_num_states);
2946 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
2947 device_remove_file(adev->dev, &dev_attr_pp_force_state);
2948 device_remove_file(adev->dev, &dev_attr_pp_table);
2949
2950 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2951 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2952 if (adev->asic_type >= CHIP_VEGA10) {
2953 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
2954 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2955 }
2956 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
2957 if (adev->asic_type >= CHIP_VEGA20)
2958 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
2959 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
2960 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2961 device_remove_file(adev->dev,
2962 &dev_attr_pp_power_profile_mode);
2963 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2964 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2965 device_remove_file(adev->dev,
2966 &dev_attr_pp_od_clk_voltage);
2967 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2968 if (!(adev->flags & AMD_IS_APU) &&
2969 (adev->asic_type != CHIP_VEGA10))
2970 device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
2971 if (!(adev->flags & AMD_IS_APU))
2972 device_remove_file(adev->dev, &dev_attr_pcie_bw);
2973 if (adev->unique_id)
2974 device_remove_file(adev->dev, &dev_attr_unique_id);
2975 if ((adev->asic_type >= CHIP_VEGA10) &&
2976 !(adev->flags & AMD_IS_APU))
2977 device_remove_file(adev->dev, &dev_attr_ppfeatures);
2978}
2979
2980void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
2981{
2982 int i = 0;
2983
2984 if (!adev->pm.dpm_enabled)
2985 return;
2986
2987 if (adev->mode_info.num_crtc)
2988 amdgpu_display_bandwidth_update(adev);
2989
2990 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
2991 struct amdgpu_ring *ring = adev->rings[i];
2992 if (ring && ring->sched.ready)
2993 amdgpu_fence_wait_empty(ring);
2994 }
2995
2996 if (is_support_sw_smu(adev)) {
2997 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
2998 smu_handle_task(&adev->smu,
2999 smu_dpm->dpm_level,
3000 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
3001 } else {
3002 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3003 if (!amdgpu_device_has_dc_support(adev)) {
3004 mutex_lock(&adev->pm.mutex);
3005 amdgpu_dpm_get_active_displays(adev);
3006 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3007 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3008 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3009
3010 if (adev->pm.pm_display_cfg.vrefresh > 120)
3011 adev->pm.pm_display_cfg.min_vblank_time = 0;
3012 if (adev->powerplay.pp_funcs->display_configuration_change)
3013 adev->powerplay.pp_funcs->display_configuration_change(
3014 adev->powerplay.pp_handle,
3015 &adev->pm.pm_display_cfg);
3016 mutex_unlock(&adev->pm.mutex);
3017 }
3018 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3019 } else {
3020 mutex_lock(&adev->pm.mutex);
3021 amdgpu_dpm_get_active_displays(adev);
3022 amdgpu_dpm_change_power_state_locked(adev);
3023 mutex_unlock(&adev->pm.mutex);
3024 }
3025 }
3026}
3027
3028
3029
3030
3031#if defined(CONFIG_DEBUG_FS)
3032
3033static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3034{
3035 uint32_t value;
3036 uint64_t value64;
3037 uint32_t query = 0;
3038 int size;
3039
3040
3041 size = sizeof(value);
3042 seq_printf(m, "GFX Clocks and Power:\n");
3043 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3044 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3045 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3046 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3047 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3048 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3049 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3050 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3051 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3052 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3053 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3054 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3055 size = sizeof(uint32_t);
3056 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3057 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3058 size = sizeof(value);
3059 seq_printf(m, "\n");
3060
3061
3062 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3063 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3064
3065
3066 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3067 seq_printf(m, "GPU Load: %u %%\n", value);
3068
3069 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3070 seq_printf(m, "MEM Load: %u %%\n", value);
3071
3072 seq_printf(m, "\n");
3073
3074
3075 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3076 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3077
3078 if (adev->asic_type > CHIP_VEGA20) {
3079
3080 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3081 if (!value) {
3082 seq_printf(m, "VCN: Disabled\n");
3083 } else {
3084 seq_printf(m, "VCN: Enabled\n");
3085 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3086 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3087 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3088 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3089 }
3090 }
3091 seq_printf(m, "\n");
3092 } else {
3093
3094 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3095 if (!value) {
3096 seq_printf(m, "UVD: Disabled\n");
3097 } else {
3098 seq_printf(m, "UVD: Enabled\n");
3099 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3100 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3101 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3102 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3103 }
3104 }
3105 seq_printf(m, "\n");
3106
3107
3108 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3109 if (!value) {
3110 seq_printf(m, "VCE: Disabled\n");
3111 } else {
3112 seq_printf(m, "VCE: Enabled\n");
3113 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3114 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3115 }
3116 }
3117 }
3118
3119 return 0;
3120}
3121
3122static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3123{
3124 int i;
3125
3126 for (i = 0; clocks[i].flag; i++)
3127 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3128 (flags & clocks[i].flag) ? "On" : "Off");
3129}
3130
3131static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3132{
3133 struct drm_info_node *node = (struct drm_info_node *) m->private;
3134 struct drm_device *dev = node->minor->dev;
3135 struct amdgpu_device *adev = dev->dev_private;
3136 struct drm_device *ddev = adev->ddev;
3137 u32 flags = 0;
3138
3139 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3140 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3141 amdgpu_parse_cg_state(m, flags);
3142 seq_printf(m, "\n");
3143
3144 if (!adev->pm.dpm_enabled) {
3145 seq_printf(m, "dpm not enabled\n");
3146 return 0;
3147 }
3148 if ((adev->flags & AMD_IS_PX) &&
3149 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
3150 seq_printf(m, "PX asic powered off\n");
3151 } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3152 mutex_lock(&adev->pm.mutex);
3153 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3154 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3155 else
3156 seq_printf(m, "Debugfs support not implemented for this asic\n");
3157 mutex_unlock(&adev->pm.mutex);
3158 } else {
3159 return amdgpu_debugfs_pm_info_pp(m, adev);
3160 }
3161
3162 return 0;
3163}
3164
3165static const struct drm_info_list amdgpu_pm_info_list[] = {
3166 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3167};
3168#endif
3169
3170static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3171{
3172#if defined(CONFIG_DEBUG_FS)
3173 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3174#else
3175 return 0;
3176#endif
3177}
3178