1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31#include <linux/power_supply.h>
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34#include <linux/nospec.h>
35
36static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
37
38static const struct cg_flag_name clocks[] = {
39 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
40 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
41 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
42 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
43 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
45 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
50 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
51 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
52 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
53 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
54 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
55 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
58 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
61 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
63 {0, NULL},
64};
65
66void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
67{
68 if (adev->pm.dpm_enabled) {
69 mutex_lock(&adev->pm.mutex);
70 if (power_supply_is_system_supplied() > 0)
71 adev->pm.ac_power = true;
72 else
73 adev->pm.ac_power = false;
74 if (adev->powerplay.pp_funcs->enable_bapm)
75 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
76 mutex_unlock(&adev->pm.mutex);
77 }
78}
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114static ssize_t amdgpu_get_dpm_state(struct device *dev,
115 struct device_attribute *attr,
116 char *buf)
117{
118 struct drm_device *ddev = dev_get_drvdata(dev);
119 struct amdgpu_device *adev = ddev->dev_private;
120 enum amd_pm_state_type pm;
121
122 if (adev->powerplay.pp_funcs->get_current_power_state)
123 pm = amdgpu_dpm_get_current_power_state(adev);
124 else
125 pm = adev->pm.dpm.user_state;
126
127 return snprintf(buf, PAGE_SIZE, "%s\n",
128 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
129 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
130}
131
132static ssize_t amdgpu_set_dpm_state(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf,
135 size_t count)
136{
137 struct drm_device *ddev = dev_get_drvdata(dev);
138 struct amdgpu_device *adev = ddev->dev_private;
139 enum amd_pm_state_type state;
140
141 if (strncmp("battery", buf, strlen("battery")) == 0)
142 state = POWER_STATE_TYPE_BATTERY;
143 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
144 state = POWER_STATE_TYPE_BALANCED;
145 else if (strncmp("performance", buf, strlen("performance")) == 0)
146 state = POWER_STATE_TYPE_PERFORMANCE;
147 else {
148 count = -EINVAL;
149 goto fail;
150 }
151
152 if (adev->powerplay.pp_funcs->dispatch_tasks) {
153 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
154 } else {
155 mutex_lock(&adev->pm.mutex);
156 adev->pm.dpm.user_state = state;
157 mutex_unlock(&adev->pm.mutex);
158
159
160 if (!(adev->flags & AMD_IS_PX) ||
161 (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
162 amdgpu_pm_compute_clocks(adev);
163 }
164fail:
165 return count;
166}
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
229 struct device_attribute *attr,
230 char *buf)
231{
232 struct drm_device *ddev = dev_get_drvdata(dev);
233 struct amdgpu_device *adev = ddev->dev_private;
234 enum amd_dpm_forced_level level = 0xff;
235
236 if ((adev->flags & AMD_IS_PX) &&
237 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
238 return snprintf(buf, PAGE_SIZE, "off\n");
239
240 if (adev->powerplay.pp_funcs->get_performance_level)
241 level = amdgpu_dpm_get_performance_level(adev);
242 else
243 level = adev->pm.dpm.forced_level;
244
245 return snprintf(buf, PAGE_SIZE, "%s\n",
246 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
247 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
248 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
249 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
250 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
251 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
252 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
253 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
254 "unknown");
255}
256
257static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
258 struct device_attribute *attr,
259 const char *buf,
260 size_t count)
261{
262 struct drm_device *ddev = dev_get_drvdata(dev);
263 struct amdgpu_device *adev = ddev->dev_private;
264 enum amd_dpm_forced_level level;
265 enum amd_dpm_forced_level current_level = 0xff;
266 int ret = 0;
267
268
269 if ((adev->flags & AMD_IS_PX) &&
270 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
271 return -EINVAL;
272
273 if (adev->powerplay.pp_funcs->get_performance_level)
274 current_level = amdgpu_dpm_get_performance_level(adev);
275
276 if (strncmp("low", buf, strlen("low")) == 0) {
277 level = AMD_DPM_FORCED_LEVEL_LOW;
278 } else if (strncmp("high", buf, strlen("high")) == 0) {
279 level = AMD_DPM_FORCED_LEVEL_HIGH;
280 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
281 level = AMD_DPM_FORCED_LEVEL_AUTO;
282 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
283 level = AMD_DPM_FORCED_LEVEL_MANUAL;
284 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
285 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
286 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
287 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
288 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
289 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
290 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
291 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
292 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
293 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
294 } else {
295 count = -EINVAL;
296 goto fail;
297 }
298
299 if (current_level == level)
300 return count;
301
302 if (adev->powerplay.pp_funcs->force_performance_level) {
303 mutex_lock(&adev->pm.mutex);
304 if (adev->pm.dpm.thermal_active) {
305 count = -EINVAL;
306 mutex_unlock(&adev->pm.mutex);
307 goto fail;
308 }
309 ret = amdgpu_dpm_force_performance_level(adev, level);
310 if (ret)
311 count = -EINVAL;
312 else
313 adev->pm.dpm.forced_level = level;
314 mutex_unlock(&adev->pm.mutex);
315 }
316
317fail:
318 return count;
319}
320
321static ssize_t amdgpu_get_pp_num_states(struct device *dev,
322 struct device_attribute *attr,
323 char *buf)
324{
325 struct drm_device *ddev = dev_get_drvdata(dev);
326 struct amdgpu_device *adev = ddev->dev_private;
327 struct pp_states_info data;
328 int i, buf_len;
329
330 if (adev->powerplay.pp_funcs->get_pp_num_states)
331 amdgpu_dpm_get_pp_num_states(adev, &data);
332
333 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
334 for (i = 0; i < data.nums; i++)
335 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
336 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
337 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
338 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
339 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
340
341 return buf_len;
342}
343
344static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
345 struct device_attribute *attr,
346 char *buf)
347{
348 struct drm_device *ddev = dev_get_drvdata(dev);
349 struct amdgpu_device *adev = ddev->dev_private;
350 struct pp_states_info data;
351 enum amd_pm_state_type pm = 0;
352 int i = 0;
353
354 if (adev->powerplay.pp_funcs->get_current_power_state
355 && adev->powerplay.pp_funcs->get_pp_num_states) {
356 pm = amdgpu_dpm_get_current_power_state(adev);
357 amdgpu_dpm_get_pp_num_states(adev, &data);
358
359 for (i = 0; i < data.nums; i++) {
360 if (pm == data.states[i])
361 break;
362 }
363
364 if (i == data.nums)
365 i = -EINVAL;
366 }
367
368 return snprintf(buf, PAGE_SIZE, "%d\n", i);
369}
370
371static ssize_t amdgpu_get_pp_force_state(struct device *dev,
372 struct device_attribute *attr,
373 char *buf)
374{
375 struct drm_device *ddev = dev_get_drvdata(dev);
376 struct amdgpu_device *adev = ddev->dev_private;
377
378 if (adev->pp_force_state_enabled)
379 return amdgpu_get_pp_cur_state(dev, attr, buf);
380 else
381 return snprintf(buf, PAGE_SIZE, "\n");
382}
383
384static ssize_t amdgpu_set_pp_force_state(struct device *dev,
385 struct device_attribute *attr,
386 const char *buf,
387 size_t count)
388{
389 struct drm_device *ddev = dev_get_drvdata(dev);
390 struct amdgpu_device *adev = ddev->dev_private;
391 enum amd_pm_state_type state = 0;
392 unsigned long idx;
393 int ret;
394
395 if (strlen(buf) == 1)
396 adev->pp_force_state_enabled = false;
397 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
398 adev->powerplay.pp_funcs->get_pp_num_states) {
399 struct pp_states_info data;
400
401 ret = kstrtoul(buf, 0, &idx);
402 if (ret || idx >= ARRAY_SIZE(data.states)) {
403 count = -EINVAL;
404 goto fail;
405 }
406 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
407
408 amdgpu_dpm_get_pp_num_states(adev, &data);
409 state = data.states[idx];
410
411 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
412 state != POWER_STATE_TYPE_DEFAULT) {
413 amdgpu_dpm_dispatch_task(adev,
414 AMD_PP_TASK_ENABLE_USER_STATE, &state);
415 adev->pp_force_state_enabled = true;
416 }
417 }
418fail:
419 return count;
420}
421
422
423
424
425
426
427
428
429
430
431
432
433static ssize_t amdgpu_get_pp_table(struct device *dev,
434 struct device_attribute *attr,
435 char *buf)
436{
437 struct drm_device *ddev = dev_get_drvdata(dev);
438 struct amdgpu_device *adev = ddev->dev_private;
439 char *table = NULL;
440 int size;
441
442 if (adev->powerplay.pp_funcs->get_pp_table)
443 size = amdgpu_dpm_get_pp_table(adev, &table);
444 else
445 return 0;
446
447 if (size >= PAGE_SIZE)
448 size = PAGE_SIZE - 1;
449
450 memcpy(buf, table, size);
451
452 return size;
453}
454
455static ssize_t amdgpu_set_pp_table(struct device *dev,
456 struct device_attribute *attr,
457 const char *buf,
458 size_t count)
459{
460 struct drm_device *ddev = dev_get_drvdata(dev);
461 struct amdgpu_device *adev = ddev->dev_private;
462
463 if (adev->powerplay.pp_funcs->set_pp_table)
464 amdgpu_dpm_set_pp_table(adev, buf, count);
465
466 return count;
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
496 struct device_attribute *attr,
497 const char *buf,
498 size_t count)
499{
500 struct drm_device *ddev = dev_get_drvdata(dev);
501 struct amdgpu_device *adev = ddev->dev_private;
502 int ret;
503 uint32_t parameter_size = 0;
504 long parameter[64];
505 char buf_cpy[128];
506 char *tmp_str;
507 char *sub_str;
508 const char delimiter[3] = {' ', '\n', '\0'};
509 uint32_t type;
510
511 if (count > 127)
512 return -EINVAL;
513
514 if (*buf == 's')
515 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
516 else if (*buf == 'm')
517 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
518 else if(*buf == 'r')
519 type = PP_OD_RESTORE_DEFAULT_TABLE;
520 else if (*buf == 'c')
521 type = PP_OD_COMMIT_DPM_TABLE;
522 else
523 return -EINVAL;
524
525 memcpy(buf_cpy, buf, count+1);
526
527 tmp_str = buf_cpy;
528
529 while (isspace(*++tmp_str));
530
531 while (tmp_str[0]) {
532 sub_str = strsep(&tmp_str, delimiter);
533 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
534 if (ret)
535 return -EINVAL;
536 parameter_size++;
537
538 while (isspace(*tmp_str))
539 tmp_str++;
540 }
541
542 if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
543 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
544 parameter, parameter_size);
545
546 if (ret)
547 return -EINVAL;
548
549 if (type == PP_OD_COMMIT_DPM_TABLE) {
550 if (adev->powerplay.pp_funcs->dispatch_tasks) {
551 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
552 return count;
553 } else {
554 return -EINVAL;
555 }
556 }
557
558 return count;
559}
560
561static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
562 struct device_attribute *attr,
563 char *buf)
564{
565 struct drm_device *ddev = dev_get_drvdata(dev);
566 struct amdgpu_device *adev = ddev->dev_private;
567 uint32_t size = 0;
568
569 if (adev->powerplay.pp_funcs->print_clock_levels) {
570 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
571 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
572 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
573 return size;
574 } else {
575 return snprintf(buf, PAGE_SIZE, "\n");
576 }
577
578}
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
598 struct device_attribute *attr,
599 char *buf)
600{
601 struct drm_device *ddev = dev_get_drvdata(dev);
602 struct amdgpu_device *adev = ddev->dev_private;
603
604 if (adev->powerplay.pp_funcs->print_clock_levels)
605 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
606 else
607 return snprintf(buf, PAGE_SIZE, "\n");
608}
609
610
611
612
613
614#define AMDGPU_MASK_BUF_MAX (32 * 13)
615
616static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
617{
618 int ret;
619 long level;
620 char *sub_str = NULL;
621 char *tmp;
622 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
623 const char delimiter[3] = {' ', '\n', '\0'};
624 size_t bytes;
625
626 *mask = 0;
627
628 bytes = min(count, sizeof(buf_cpy) - 1);
629 memcpy(buf_cpy, buf, bytes);
630 buf_cpy[bytes] = '\0';
631 tmp = buf_cpy;
632 while (tmp[0]) {
633 sub_str = strsep(&tmp, delimiter);
634 if (strlen(sub_str)) {
635 ret = kstrtol(sub_str, 0, &level);
636 if (ret)
637 return -EINVAL;
638 *mask |= 1 << level;
639 } else
640 break;
641 }
642
643 return 0;
644}
645
646static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
647 struct device_attribute *attr,
648 const char *buf,
649 size_t count)
650{
651 struct drm_device *ddev = dev_get_drvdata(dev);
652 struct amdgpu_device *adev = ddev->dev_private;
653 int ret;
654 uint32_t mask = 0;
655
656 ret = amdgpu_read_mask(buf, count, &mask);
657 if (ret)
658 return ret;
659
660 if (adev->powerplay.pp_funcs->force_clock_level)
661 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
662
663 return count;
664}
665
666static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
667 struct device_attribute *attr,
668 char *buf)
669{
670 struct drm_device *ddev = dev_get_drvdata(dev);
671 struct amdgpu_device *adev = ddev->dev_private;
672
673 if (adev->powerplay.pp_funcs->print_clock_levels)
674 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
675 else
676 return snprintf(buf, PAGE_SIZE, "\n");
677}
678
679static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
680 struct device_attribute *attr,
681 const char *buf,
682 size_t count)
683{
684 struct drm_device *ddev = dev_get_drvdata(dev);
685 struct amdgpu_device *adev = ddev->dev_private;
686 int ret;
687 uint32_t mask = 0;
688
689 ret = amdgpu_read_mask(buf, count, &mask);
690 if (ret)
691 return ret;
692
693 if (adev->powerplay.pp_funcs->force_clock_level)
694 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
695
696 return count;
697}
698
699static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
700 struct device_attribute *attr,
701 char *buf)
702{
703 struct drm_device *ddev = dev_get_drvdata(dev);
704 struct amdgpu_device *adev = ddev->dev_private;
705
706 if (adev->powerplay.pp_funcs->print_clock_levels)
707 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
708 else
709 return snprintf(buf, PAGE_SIZE, "\n");
710}
711
712static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
713 struct device_attribute *attr,
714 const char *buf,
715 size_t count)
716{
717 struct drm_device *ddev = dev_get_drvdata(dev);
718 struct amdgpu_device *adev = ddev->dev_private;
719 int ret;
720 uint32_t mask = 0;
721
722 ret = amdgpu_read_mask(buf, count, &mask);
723 if (ret)
724 return ret;
725
726 if (adev->powerplay.pp_funcs->force_clock_level)
727 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
728
729 return count;
730}
731
732static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
733 struct device_attribute *attr,
734 char *buf)
735{
736 struct drm_device *ddev = dev_get_drvdata(dev);
737 struct amdgpu_device *adev = ddev->dev_private;
738 uint32_t value = 0;
739
740 if (adev->powerplay.pp_funcs->get_sclk_od)
741 value = amdgpu_dpm_get_sclk_od(adev);
742
743 return snprintf(buf, PAGE_SIZE, "%d\n", value);
744}
745
746static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
747 struct device_attribute *attr,
748 const char *buf,
749 size_t count)
750{
751 struct drm_device *ddev = dev_get_drvdata(dev);
752 struct amdgpu_device *adev = ddev->dev_private;
753 int ret;
754 long int value;
755
756 ret = kstrtol(buf, 0, &value);
757
758 if (ret) {
759 count = -EINVAL;
760 goto fail;
761 }
762 if (adev->powerplay.pp_funcs->set_sclk_od)
763 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
764
765 if (adev->powerplay.pp_funcs->dispatch_tasks) {
766 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
767 } else {
768 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
769 amdgpu_pm_compute_clocks(adev);
770 }
771
772fail:
773 return count;
774}
775
776static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
777 struct device_attribute *attr,
778 char *buf)
779{
780 struct drm_device *ddev = dev_get_drvdata(dev);
781 struct amdgpu_device *adev = ddev->dev_private;
782 uint32_t value = 0;
783
784 if (adev->powerplay.pp_funcs->get_mclk_od)
785 value = amdgpu_dpm_get_mclk_od(adev);
786
787 return snprintf(buf, PAGE_SIZE, "%d\n", value);
788}
789
790static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
791 struct device_attribute *attr,
792 const char *buf,
793 size_t count)
794{
795 struct drm_device *ddev = dev_get_drvdata(dev);
796 struct amdgpu_device *adev = ddev->dev_private;
797 int ret;
798 long int value;
799
800 ret = kstrtol(buf, 0, &value);
801
802 if (ret) {
803 count = -EINVAL;
804 goto fail;
805 }
806 if (adev->powerplay.pp_funcs->set_mclk_od)
807 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
808
809 if (adev->powerplay.pp_funcs->dispatch_tasks) {
810 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
811 } else {
812 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
813 amdgpu_pm_compute_clocks(adev);
814 }
815
816fail:
817 return count;
818}
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
841 struct device_attribute *attr,
842 char *buf)
843{
844 struct drm_device *ddev = dev_get_drvdata(dev);
845 struct amdgpu_device *adev = ddev->dev_private;
846
847 if (adev->powerplay.pp_funcs->get_power_profile_mode)
848 return amdgpu_dpm_get_power_profile_mode(adev, buf);
849
850 return snprintf(buf, PAGE_SIZE, "\n");
851}
852
853
854static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
855 struct device_attribute *attr,
856 const char *buf,
857 size_t count)
858{
859 int ret = 0xff;
860 struct drm_device *ddev = dev_get_drvdata(dev);
861 struct amdgpu_device *adev = ddev->dev_private;
862 uint32_t parameter_size = 0;
863 long parameter[64];
864 char *sub_str, buf_cpy[128];
865 char *tmp_str;
866 uint32_t i = 0;
867 char tmp[2];
868 long int profile_mode = 0;
869 const char delimiter[3] = {' ', '\n', '\0'};
870
871 tmp[0] = *(buf);
872 tmp[1] = '\0';
873 ret = kstrtol(tmp, 0, &profile_mode);
874 if (ret)
875 goto fail;
876
877 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
878 if (count < 2 || count > 127)
879 return -EINVAL;
880 while (isspace(*++buf))
881 i++;
882 memcpy(buf_cpy, buf, count-i);
883 tmp_str = buf_cpy;
884 while (tmp_str[0]) {
885 sub_str = strsep(&tmp_str, delimiter);
886 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
887 if (ret) {
888 count = -EINVAL;
889 goto fail;
890 }
891 parameter_size++;
892 while (isspace(*tmp_str))
893 tmp_str++;
894 }
895 }
896 parameter[parameter_size] = profile_mode;
897 if (adev->powerplay.pp_funcs->set_power_profile_mode)
898 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
899
900 if (!ret)
901 return count;
902fail:
903 return -EINVAL;
904}
905
906
907
908
909
910
911
912
913
914static ssize_t amdgpu_get_busy_percent(struct device *dev,
915 struct device_attribute *attr,
916 char *buf)
917{
918 struct drm_device *ddev = dev_get_drvdata(dev);
919 struct amdgpu_device *adev = ddev->dev_private;
920 int r, value, size = sizeof(value);
921
922
923 if (!(adev->powerplay.pp_funcs &&
924 adev->powerplay.pp_funcs->read_sensor))
925 return -EINVAL;
926
927
928 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
929 (void *)&value, &size);
930 if (r)
931 return r;
932
933 return snprintf(buf, PAGE_SIZE, "%d\n", value);
934}
935
936static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
937static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
938 amdgpu_get_dpm_forced_performance_level,
939 amdgpu_set_dpm_forced_performance_level);
940static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
941static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
942static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
943 amdgpu_get_pp_force_state,
944 amdgpu_set_pp_force_state);
945static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
946 amdgpu_get_pp_table,
947 amdgpu_set_pp_table);
948static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
949 amdgpu_get_pp_dpm_sclk,
950 amdgpu_set_pp_dpm_sclk);
951static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
952 amdgpu_get_pp_dpm_mclk,
953 amdgpu_set_pp_dpm_mclk);
954static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
955 amdgpu_get_pp_dpm_pcie,
956 amdgpu_set_pp_dpm_pcie);
957static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
958 amdgpu_get_pp_sclk_od,
959 amdgpu_set_pp_sclk_od);
960static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
961 amdgpu_get_pp_mclk_od,
962 amdgpu_set_pp_mclk_od);
963static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
964 amdgpu_get_pp_power_profile_mode,
965 amdgpu_set_pp_power_profile_mode);
966static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
967 amdgpu_get_pp_od_clk_voltage,
968 amdgpu_set_pp_od_clk_voltage);
969static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
970 amdgpu_get_busy_percent, NULL);
971
972static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
973 struct device_attribute *attr,
974 char *buf)
975{
976 struct amdgpu_device *adev = dev_get_drvdata(dev);
977 struct drm_device *ddev = adev->ddev;
978 int r, temp, size = sizeof(temp);
979
980
981 if ((adev->flags & AMD_IS_PX) &&
982 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
983 return -EINVAL;
984
985
986 if (!(adev->powerplay.pp_funcs &&
987 adev->powerplay.pp_funcs->read_sensor))
988 return -EINVAL;
989
990
991 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
992 (void *)&temp, &size);
993 if (r)
994 return r;
995
996 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
997}
998
999static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1000 struct device_attribute *attr,
1001 char *buf)
1002{
1003 struct amdgpu_device *adev = dev_get_drvdata(dev);
1004 int hyst = to_sensor_dev_attr(attr)->index;
1005 int temp;
1006
1007 if (hyst)
1008 temp = adev->pm.dpm.thermal.min_temp;
1009 else
1010 temp = adev->pm.dpm.thermal.max_temp;
1011
1012 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1013}
1014
1015static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1016 struct device_attribute *attr,
1017 char *buf)
1018{
1019 struct amdgpu_device *adev = dev_get_drvdata(dev);
1020 u32 pwm_mode = 0;
1021
1022 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1023 return -EINVAL;
1024
1025 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1026
1027 return sprintf(buf, "%i\n", pwm_mode);
1028}
1029
1030static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
1031 struct device_attribute *attr,
1032 const char *buf,
1033 size_t count)
1034{
1035 struct amdgpu_device *adev = dev_get_drvdata(dev);
1036 int err;
1037 int value;
1038
1039
1040 if ((adev->flags & AMD_IS_PX) &&
1041 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1042 return -EINVAL;
1043
1044 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1045 return -EINVAL;
1046
1047 err = kstrtoint(buf, 10, &value);
1048 if (err)
1049 return err;
1050
1051 amdgpu_dpm_set_fan_control_mode(adev, value);
1052
1053 return count;
1054}
1055
1056static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
1057 struct device_attribute *attr,
1058 char *buf)
1059{
1060 return sprintf(buf, "%i\n", 0);
1061}
1062
1063static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
1064 struct device_attribute *attr,
1065 char *buf)
1066{
1067 return sprintf(buf, "%i\n", 255);
1068}
1069
1070static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
1071 struct device_attribute *attr,
1072 const char *buf, size_t count)
1073{
1074 struct amdgpu_device *adev = dev_get_drvdata(dev);
1075 int err;
1076 u32 value;
1077
1078
1079 if ((adev->flags & AMD_IS_PX) &&
1080 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1081 return -EINVAL;
1082
1083 err = kstrtou32(buf, 10, &value);
1084 if (err)
1085 return err;
1086
1087 value = (value * 100) / 255;
1088
1089 if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
1090 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
1091 if (err)
1092 return err;
1093 }
1094
1095 return count;
1096}
1097
1098static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
1099 struct device_attribute *attr,
1100 char *buf)
1101{
1102 struct amdgpu_device *adev = dev_get_drvdata(dev);
1103 int err;
1104 u32 speed = 0;
1105
1106
1107 if ((adev->flags & AMD_IS_PX) &&
1108 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1109 return -EINVAL;
1110
1111 if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
1112 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
1113 if (err)
1114 return err;
1115 }
1116
1117 speed = (speed * 255) / 100;
1118
1119 return sprintf(buf, "%i\n", speed);
1120}
1121
1122static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1123 struct device_attribute *attr,
1124 char *buf)
1125{
1126 struct amdgpu_device *adev = dev_get_drvdata(dev);
1127 int err;
1128 u32 speed = 0;
1129
1130
1131 if ((adev->flags & AMD_IS_PX) &&
1132 (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1133 return -EINVAL;
1134
1135 if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1136 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
1137 if (err)
1138 return err;
1139 }
1140
1141 return sprintf(buf, "%i\n", speed);
1142}
1143
1144static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
1145 struct device_attribute *attr,
1146 char *buf)
1147{
1148 struct amdgpu_device *adev = dev_get_drvdata(dev);
1149 struct drm_device *ddev = adev->ddev;
1150 u32 vddgfx;
1151 int r, size = sizeof(vddgfx);
1152
1153
1154 if ((adev->flags & AMD_IS_PX) &&
1155 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1156 return -EINVAL;
1157
1158
1159 if (!(adev->powerplay.pp_funcs &&
1160 adev->powerplay.pp_funcs->read_sensor))
1161 return -EINVAL;
1162
1163
1164 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
1165 (void *)&vddgfx, &size);
1166 if (r)
1167 return r;
1168
1169 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
1170}
1171
1172static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
1173 struct device_attribute *attr,
1174 char *buf)
1175{
1176 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
1177}
1178
1179static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1180 struct device_attribute *attr,
1181 char *buf)
1182{
1183 struct amdgpu_device *adev = dev_get_drvdata(dev);
1184 struct drm_device *ddev = adev->ddev;
1185 u32 vddnb;
1186 int r, size = sizeof(vddnb);
1187
1188
1189 if (!(adev->flags & AMD_IS_APU))
1190 return -EINVAL;
1191
1192
1193 if ((adev->flags & AMD_IS_PX) &&
1194 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1195 return -EINVAL;
1196
1197
1198 if (!(adev->powerplay.pp_funcs &&
1199 adev->powerplay.pp_funcs->read_sensor))
1200 return -EINVAL;
1201
1202
1203 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1204 (void *)&vddnb, &size);
1205 if (r)
1206 return r;
1207
1208 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1209}
1210
1211static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1212 struct device_attribute *attr,
1213 char *buf)
1214{
1215 return snprintf(buf, PAGE_SIZE, "vddnb\n");
1216}
1217
1218static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1219 struct device_attribute *attr,
1220 char *buf)
1221{
1222 struct amdgpu_device *adev = dev_get_drvdata(dev);
1223 struct drm_device *ddev = adev->ddev;
1224 u32 query = 0;
1225 int r, size = sizeof(u32);
1226 unsigned uw;
1227
1228
1229 if ((adev->flags & AMD_IS_PX) &&
1230 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1231 return -EINVAL;
1232
1233
1234 if (!(adev->powerplay.pp_funcs &&
1235 adev->powerplay.pp_funcs->read_sensor))
1236 return -EINVAL;
1237
1238
1239 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1240 (void *)&query, &size);
1241 if (r)
1242 return r;
1243
1244
1245 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1246
1247 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1248}
1249
1250static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
1251 struct device_attribute *attr,
1252 char *buf)
1253{
1254 return sprintf(buf, "%i\n", 0);
1255}
1256
1257static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
1258 struct device_attribute *attr,
1259 char *buf)
1260{
1261 struct amdgpu_device *adev = dev_get_drvdata(dev);
1262 uint32_t limit = 0;
1263
1264 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1265 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
1266 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1267 } else {
1268 return snprintf(buf, PAGE_SIZE, "\n");
1269 }
1270}
1271
1272static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
1273 struct device_attribute *attr,
1274 char *buf)
1275{
1276 struct amdgpu_device *adev = dev_get_drvdata(dev);
1277 uint32_t limit = 0;
1278
1279 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1280 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
1281 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1282 } else {
1283 return snprintf(buf, PAGE_SIZE, "\n");
1284 }
1285}
1286
1287
1288static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1289 struct device_attribute *attr,
1290 const char *buf,
1291 size_t count)
1292{
1293 struct amdgpu_device *adev = dev_get_drvdata(dev);
1294 int err;
1295 u32 value;
1296
1297 err = kstrtou32(buf, 10, &value);
1298 if (err)
1299 return err;
1300
1301 value = value / 1000000;
1302 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
1303 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
1304 if (err)
1305 return err;
1306 } else {
1307 return -EINVAL;
1308 }
1309
1310 return count;
1311}
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
1370static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
1371static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
1372static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
1373static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
1374static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
1375static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
1376static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
1377static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
1378static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
1379static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
1380static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
1381static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
1382static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
1383static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
1384static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
1385
1386static struct attribute *hwmon_attributes[] = {
1387 &sensor_dev_attr_temp1_input.dev_attr.attr,
1388 &sensor_dev_attr_temp1_crit.dev_attr.attr,
1389 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
1390 &sensor_dev_attr_pwm1.dev_attr.attr,
1391 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
1392 &sensor_dev_attr_pwm1_min.dev_attr.attr,
1393 &sensor_dev_attr_pwm1_max.dev_attr.attr,
1394 &sensor_dev_attr_fan1_input.dev_attr.attr,
1395 &sensor_dev_attr_in0_input.dev_attr.attr,
1396 &sensor_dev_attr_in0_label.dev_attr.attr,
1397 &sensor_dev_attr_in1_input.dev_attr.attr,
1398 &sensor_dev_attr_in1_label.dev_attr.attr,
1399 &sensor_dev_attr_power1_average.dev_attr.attr,
1400 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
1401 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
1402 &sensor_dev_attr_power1_cap.dev_attr.attr,
1403 NULL
1404};
1405
1406static umode_t hwmon_attributes_visible(struct kobject *kobj,
1407 struct attribute *attr, int index)
1408{
1409 struct device *dev = kobj_to_dev(kobj);
1410 struct amdgpu_device *adev = dev_get_drvdata(dev);
1411 umode_t effective_mode = attr->mode;
1412
1413
1414
1415 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1416 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1417 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1418 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
1419 attr == &sensor_dev_attr_fan1_input.dev_attr.attr))
1420 return 0;
1421
1422
1423 if (!adev->pm.dpm_enabled &&
1424 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
1425 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
1426 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
1427 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
1428 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1429 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1430 return 0;
1431
1432
1433 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
1434 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
1435 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
1436 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
1437 effective_mode &= ~S_IRUGO;
1438
1439 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1440 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
1441 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
1442 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
1443 effective_mode &= ~S_IWUSR;
1444
1445 if ((adev->flags & AMD_IS_APU) &&
1446 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1447 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1448 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1449 return 0;
1450
1451
1452 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
1453 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
1454 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
1455 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
1456 return 0;
1457
1458
1459 if (!(adev->flags & AMD_IS_APU) &&
1460 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
1461 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
1462 return 0;
1463
1464 return effective_mode;
1465}
1466
1467static const struct attribute_group hwmon_attrgroup = {
1468 .attrs = hwmon_attributes,
1469 .is_visible = hwmon_attributes_visible,
1470};
1471
1472static const struct attribute_group *hwmon_groups[] = {
1473 &hwmon_attrgroup,
1474 NULL
1475};
1476
1477void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1478{
1479 struct amdgpu_device *adev =
1480 container_of(work, struct amdgpu_device,
1481 pm.dpm.thermal.work);
1482
1483 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1484 int temp, size = sizeof(temp);
1485
1486 if (!adev->pm.dpm_enabled)
1487 return;
1488
1489 if (adev->powerplay.pp_funcs &&
1490 adev->powerplay.pp_funcs->read_sensor &&
1491 !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1492 (void *)&temp, &size)) {
1493 if (temp < adev->pm.dpm.thermal.min_temp)
1494
1495 dpm_state = adev->pm.dpm.user_state;
1496 } else {
1497 if (adev->pm.dpm.thermal.high_to_low)
1498
1499 dpm_state = adev->pm.dpm.user_state;
1500 }
1501 mutex_lock(&adev->pm.mutex);
1502 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1503 adev->pm.dpm.thermal_active = true;
1504 else
1505 adev->pm.dpm.thermal_active = false;
1506 adev->pm.dpm.state = dpm_state;
1507 mutex_unlock(&adev->pm.mutex);
1508
1509 amdgpu_pm_compute_clocks(adev);
1510}
1511
1512static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1513 enum amd_pm_state_type dpm_state)
1514{
1515 int i;
1516 struct amdgpu_ps *ps;
1517 u32 ui_class;
1518 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1519 true : false;
1520
1521
1522 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1523 if (amdgpu_dpm_vblank_too_short(adev))
1524 single_display = false;
1525 }
1526
1527
1528
1529
1530 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1531 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1532
1533 if (dpm_state == POWER_STATE_TYPE_BALANCED)
1534 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1535
1536restart_search:
1537
1538 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1539 ps = &adev->pm.dpm.ps[i];
1540 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1541 switch (dpm_state) {
1542
1543 case POWER_STATE_TYPE_BATTERY:
1544 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1545 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1546 if (single_display)
1547 return ps;
1548 } else
1549 return ps;
1550 }
1551 break;
1552 case POWER_STATE_TYPE_BALANCED:
1553 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1554 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1555 if (single_display)
1556 return ps;
1557 } else
1558 return ps;
1559 }
1560 break;
1561 case POWER_STATE_TYPE_PERFORMANCE:
1562 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1563 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1564 if (single_display)
1565 return ps;
1566 } else
1567 return ps;
1568 }
1569 break;
1570
1571 case POWER_STATE_TYPE_INTERNAL_UVD:
1572 if (adev->pm.dpm.uvd_ps)
1573 return adev->pm.dpm.uvd_ps;
1574 else
1575 break;
1576 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1577 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1578 return ps;
1579 break;
1580 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1581 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1582 return ps;
1583 break;
1584 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1585 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1586 return ps;
1587 break;
1588 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1589 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1590 return ps;
1591 break;
1592 case POWER_STATE_TYPE_INTERNAL_BOOT:
1593 return adev->pm.dpm.boot_ps;
1594 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1595 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1596 return ps;
1597 break;
1598 case POWER_STATE_TYPE_INTERNAL_ACPI:
1599 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1600 return ps;
1601 break;
1602 case POWER_STATE_TYPE_INTERNAL_ULV:
1603 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1604 return ps;
1605 break;
1606 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1607 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1608 return ps;
1609 break;
1610 default:
1611 break;
1612 }
1613 }
1614
1615 switch (dpm_state) {
1616 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1617 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1618 goto restart_search;
1619 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1620 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1621 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1622 if (adev->pm.dpm.uvd_ps) {
1623 return adev->pm.dpm.uvd_ps;
1624 } else {
1625 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1626 goto restart_search;
1627 }
1628 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1629 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1630 goto restart_search;
1631 case POWER_STATE_TYPE_INTERNAL_ACPI:
1632 dpm_state = POWER_STATE_TYPE_BATTERY;
1633 goto restart_search;
1634 case POWER_STATE_TYPE_BATTERY:
1635 case POWER_STATE_TYPE_BALANCED:
1636 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1637 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1638 goto restart_search;
1639 default:
1640 break;
1641 }
1642
1643 return NULL;
1644}
1645
1646static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1647{
1648 struct amdgpu_ps *ps;
1649 enum amd_pm_state_type dpm_state;
1650 int ret;
1651 bool equal = false;
1652
1653
1654 if (!adev->pm.dpm_enabled)
1655 return;
1656
1657 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1658
1659 if ((!adev->pm.dpm.thermal_active) &&
1660 (!adev->pm.dpm.uvd_active))
1661 adev->pm.dpm.state = adev->pm.dpm.user_state;
1662 }
1663 dpm_state = adev->pm.dpm.state;
1664
1665 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1666 if (ps)
1667 adev->pm.dpm.requested_ps = ps;
1668 else
1669 return;
1670
1671 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1672 printk("switching from power state:\n");
1673 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1674 printk("switching to power state:\n");
1675 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1676 }
1677
1678
1679 ps->vce_active = adev->pm.dpm.vce_active;
1680 if (adev->powerplay.pp_funcs->display_configuration_changed)
1681 amdgpu_dpm_display_configuration_changed(adev);
1682
1683 ret = amdgpu_dpm_pre_set_power_state(adev);
1684 if (ret)
1685 return;
1686
1687 if (adev->powerplay.pp_funcs->check_state_equal) {
1688 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1689 equal = false;
1690 }
1691
1692 if (equal)
1693 return;
1694
1695 amdgpu_dpm_set_power_state(adev);
1696 amdgpu_dpm_post_set_power_state(adev);
1697
1698 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1699 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1700
1701 if (adev->powerplay.pp_funcs->force_performance_level) {
1702 if (adev->pm.dpm.thermal_active) {
1703 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1704
1705 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1706
1707 adev->pm.dpm.forced_level = level;
1708 } else {
1709
1710 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1711 }
1712 }
1713}
1714
1715void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1716{
1717 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
1718
1719 mutex_lock(&adev->pm.mutex);
1720 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1721 mutex_unlock(&adev->pm.mutex);
1722 } else {
1723 if (enable) {
1724 mutex_lock(&adev->pm.mutex);
1725 adev->pm.dpm.uvd_active = true;
1726 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1727 mutex_unlock(&adev->pm.mutex);
1728 } else {
1729 mutex_lock(&adev->pm.mutex);
1730 adev->pm.dpm.uvd_active = false;
1731 mutex_unlock(&adev->pm.mutex);
1732 }
1733 amdgpu_pm_compute_clocks(adev);
1734 }
1735}
1736
1737void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1738{
1739 if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
1740
1741 mutex_lock(&adev->pm.mutex);
1742 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1743 mutex_unlock(&adev->pm.mutex);
1744 } else {
1745 if (enable) {
1746 mutex_lock(&adev->pm.mutex);
1747 adev->pm.dpm.vce_active = true;
1748
1749 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1750 mutex_unlock(&adev->pm.mutex);
1751 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1752 AMD_CG_STATE_UNGATE);
1753 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1754 AMD_PG_STATE_UNGATE);
1755 amdgpu_pm_compute_clocks(adev);
1756 } else {
1757 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1758 AMD_PG_STATE_GATE);
1759 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1760 AMD_CG_STATE_GATE);
1761 mutex_lock(&adev->pm.mutex);
1762 adev->pm.dpm.vce_active = false;
1763 mutex_unlock(&adev->pm.mutex);
1764 amdgpu_pm_compute_clocks(adev);
1765 }
1766
1767 }
1768}
1769
1770void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1771{
1772 int i;
1773
1774 if (adev->powerplay.pp_funcs->print_power_state == NULL)
1775 return;
1776
1777 for (i = 0; i < adev->pm.dpm.num_ps; i++)
1778 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1779
1780}
1781
1782int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1783{
1784 int ret;
1785
1786 if (adev->pm.sysfs_initialized)
1787 return 0;
1788
1789 if (adev->pm.dpm_enabled == 0)
1790 return 0;
1791
1792 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
1793 DRIVER_NAME, adev,
1794 hwmon_groups);
1795 if (IS_ERR(adev->pm.int_hwmon_dev)) {
1796 ret = PTR_ERR(adev->pm.int_hwmon_dev);
1797 dev_err(adev->dev,
1798 "Unable to register hwmon device: %d\n", ret);
1799 return ret;
1800 }
1801
1802 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
1803 if (ret) {
1804 DRM_ERROR("failed to create device file for dpm state\n");
1805 return ret;
1806 }
1807 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1808 if (ret) {
1809 DRM_ERROR("failed to create device file for dpm state\n");
1810 return ret;
1811 }
1812
1813
1814 ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
1815 if (ret) {
1816 DRM_ERROR("failed to create device file pp_num_states\n");
1817 return ret;
1818 }
1819 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
1820 if (ret) {
1821 DRM_ERROR("failed to create device file pp_cur_state\n");
1822 return ret;
1823 }
1824 ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
1825 if (ret) {
1826 DRM_ERROR("failed to create device file pp_force_state\n");
1827 return ret;
1828 }
1829 ret = device_create_file(adev->dev, &dev_attr_pp_table);
1830 if (ret) {
1831 DRM_ERROR("failed to create device file pp_table\n");
1832 return ret;
1833 }
1834
1835 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1836 if (ret) {
1837 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1838 return ret;
1839 }
1840 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1841 if (ret) {
1842 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1843 return ret;
1844 }
1845 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1846 if (ret) {
1847 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1848 return ret;
1849 }
1850 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
1851 if (ret) {
1852 DRM_ERROR("failed to create device file pp_sclk_od\n");
1853 return ret;
1854 }
1855 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
1856 if (ret) {
1857 DRM_ERROR("failed to create device file pp_mclk_od\n");
1858 return ret;
1859 }
1860 ret = device_create_file(adev->dev,
1861 &dev_attr_pp_power_profile_mode);
1862 if (ret) {
1863 DRM_ERROR("failed to create device file "
1864 "pp_power_profile_mode\n");
1865 return ret;
1866 }
1867 ret = device_create_file(adev->dev,
1868 &dev_attr_pp_od_clk_voltage);
1869 if (ret) {
1870 DRM_ERROR("failed to create device file "
1871 "pp_od_clk_voltage\n");
1872 return ret;
1873 }
1874 ret = device_create_file(adev->dev,
1875 &dev_attr_gpu_busy_percent);
1876 if (ret) {
1877 DRM_ERROR("failed to create device file "
1878 "gpu_busy_level\n");
1879 return ret;
1880 }
1881 ret = amdgpu_debugfs_pm_init(adev);
1882 if (ret) {
1883 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1884 return ret;
1885 }
1886
1887 adev->pm.sysfs_initialized = true;
1888
1889 return 0;
1890}
1891
1892void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1893{
1894 if (adev->pm.dpm_enabled == 0)
1895 return;
1896
1897 if (adev->pm.int_hwmon_dev)
1898 hwmon_device_unregister(adev->pm.int_hwmon_dev);
1899 device_remove_file(adev->dev, &dev_attr_power_dpm_state);
1900 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
1901
1902 device_remove_file(adev->dev, &dev_attr_pp_num_states);
1903 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1904 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1905 device_remove_file(adev->dev, &dev_attr_pp_table);
1906
1907 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1908 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1909 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1910 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
1911 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
1912 device_remove_file(adev->dev,
1913 &dev_attr_pp_power_profile_mode);
1914 device_remove_file(adev->dev,
1915 &dev_attr_pp_od_clk_voltage);
1916 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
1917}
1918
1919void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1920{
1921 int i = 0;
1922
1923 if (!adev->pm.dpm_enabled)
1924 return;
1925
1926 if (adev->mode_info.num_crtc)
1927 amdgpu_display_bandwidth_update(adev);
1928
1929 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1930 struct amdgpu_ring *ring = adev->rings[i];
1931 if (ring && ring->ready)
1932 amdgpu_fence_wait_empty(ring);
1933 }
1934
1935 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1936 if (!amdgpu_device_has_dc_support(adev)) {
1937 mutex_lock(&adev->pm.mutex);
1938 amdgpu_dpm_get_active_displays(adev);
1939 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1940 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1941 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1942
1943 if (adev->pm.pm_display_cfg.vrefresh > 120)
1944 adev->pm.pm_display_cfg.min_vblank_time = 0;
1945 if (adev->powerplay.pp_funcs->display_configuration_change)
1946 adev->powerplay.pp_funcs->display_configuration_change(
1947 adev->powerplay.pp_handle,
1948 &adev->pm.pm_display_cfg);
1949 mutex_unlock(&adev->pm.mutex);
1950 }
1951 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1952 } else {
1953 mutex_lock(&adev->pm.mutex);
1954 amdgpu_dpm_get_active_displays(adev);
1955 amdgpu_dpm_change_power_state_locked(adev);
1956 mutex_unlock(&adev->pm.mutex);
1957 }
1958}
1959
1960
1961
1962
1963#if defined(CONFIG_DEBUG_FS)
1964
1965static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
1966{
1967 uint32_t value;
1968 uint32_t query = 0;
1969 int size;
1970
1971
1972 if (!(adev->powerplay.pp_funcs &&
1973 adev->powerplay.pp_funcs->read_sensor))
1974 return -EINVAL;
1975
1976
1977 size = sizeof(value);
1978 seq_printf(m, "GFX Clocks and Power:\n");
1979 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
1980 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
1981 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
1982 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
1983 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
1984 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
1985 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
1986 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
1987 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
1988 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
1989 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
1990 seq_printf(m, "\t%u mV (VDDNB)\n", value);
1991 size = sizeof(uint32_t);
1992 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
1993 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
1994 size = sizeof(value);
1995 seq_printf(m, "\n");
1996
1997
1998 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
1999 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
2000
2001
2002 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
2003 seq_printf(m, "GPU Load: %u %%\n", value);
2004 seq_printf(m, "\n");
2005
2006
2007 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
2008 if (!value) {
2009 seq_printf(m, "UVD: Disabled\n");
2010 } else {
2011 seq_printf(m, "UVD: Enabled\n");
2012 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
2013 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
2014 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
2015 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
2016 }
2017 }
2018 seq_printf(m, "\n");
2019
2020
2021 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
2022 if (!value) {
2023 seq_printf(m, "VCE: Disabled\n");
2024 } else {
2025 seq_printf(m, "VCE: Enabled\n");
2026 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
2027 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
2028 }
2029 }
2030
2031 return 0;
2032}
2033
2034static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
2035{
2036 int i;
2037
2038 for (i = 0; clocks[i].flag; i++)
2039 seq_printf(m, "\t%s: %s\n", clocks[i].name,
2040 (flags & clocks[i].flag) ? "On" : "Off");
2041}
2042
2043static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
2044{
2045 struct drm_info_node *node = (struct drm_info_node *) m->private;
2046 struct drm_device *dev = node->minor->dev;
2047 struct amdgpu_device *adev = dev->dev_private;
2048 struct drm_device *ddev = adev->ddev;
2049 u32 flags = 0;
2050
2051 amdgpu_device_ip_get_clockgating_state(adev, &flags);
2052 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
2053 amdgpu_parse_cg_state(m, flags);
2054 seq_printf(m, "\n");
2055
2056 if (!adev->pm.dpm_enabled) {
2057 seq_printf(m, "dpm not enabled\n");
2058 return 0;
2059 }
2060 if ((adev->flags & AMD_IS_PX) &&
2061 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
2062 seq_printf(m, "PX asic powered off\n");
2063 } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
2064 mutex_lock(&adev->pm.mutex);
2065 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
2066 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
2067 else
2068 seq_printf(m, "Debugfs support not implemented for this asic\n");
2069 mutex_unlock(&adev->pm.mutex);
2070 } else {
2071 return amdgpu_debugfs_pm_info_pp(m, adev);
2072 }
2073
2074 return 0;
2075}
2076
2077static const struct drm_info_list amdgpu_pm_info_list[] = {
2078 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
2079};
2080#endif
2081
2082static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
2083{
2084#if defined(CONFIG_DEBUG_FS)
2085 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
2086#else
2087 return 0;
2088#endif
2089}
2090