1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <drm/drm_debugfs.h>
27
28#include "amdgpu.h"
29#include "amdgpu_drv.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_dpm.h"
32#include "amdgpu_display.h"
33#include "amdgpu_smu.h"
34#include "atom.h"
35#include <linux/power_supply.h>
36#include <linux/pci.h>
37#include <linux/hwmon.h>
38#include <linux/hwmon-sysfs.h>
39#include <linux/nospec.h>
40#include <linux/pm_runtime.h>
41#include "hwmgr.h"
42#define WIDTH_4K 3840
43
44static const struct cg_flag_name clocks[] = {
45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
69
70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
72 {0, NULL},
73};
74
75static const struct hwmon_temp_label {
76 enum PP_HWMON_TEMP channel;
77 const char *label;
78} temp_label[] = {
79 {PP_TEMP_EDGE, "edge"},
80 {PP_TEMP_JUNCTION, "junction"},
81 {PP_TEMP_MEM, "mem"},
82};
83
84void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
85{
86 if (adev->pm.dpm_enabled) {
87 mutex_lock(&adev->pm.mutex);
88 if (power_supply_is_system_supplied() > 0)
89 adev->pm.ac_power = true;
90 else
91 adev->pm.ac_power = false;
92 if (adev->powerplay.pp_funcs &&
93 adev->powerplay.pp_funcs->enable_bapm)
94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
95 mutex_unlock(&adev->pm.mutex);
96
97 if (is_support_sw_smu(adev))
98 smu_set_ac_dc(&adev->smu);
99 }
100}
101
102int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
103 void *data, uint32_t *size)
104{
105 int ret = 0;
106
107 if (!data || !size)
108 return -EINVAL;
109
110 if (is_support_sw_smu(adev))
111 ret = smu_read_sensor(&adev->smu, sensor, data, size);
112 else {
113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
115 sensor, data, size);
116 else
117 ret = -EINVAL;
118 }
119
120 return ret;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160{
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = ddev->dev_private;
163 enum amd_pm_state_type pm;
164 int ret;
165
166 if (adev->in_gpu_reset)
167 return -EPERM;
168
169 ret = pm_runtime_get_sync(ddev->dev);
170 if (ret < 0) {
171 pm_runtime_put_autosuspend(ddev->dev);
172 return ret;
173 }
174
175 if (is_support_sw_smu(adev)) {
176 if (adev->smu.ppt_funcs->get_current_power_state)
177 pm = smu_get_current_power_state(&adev->smu);
178 else
179 pm = adev->pm.dpm.user_state;
180 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
181 pm = amdgpu_dpm_get_current_power_state(adev);
182 } else {
183 pm = adev->pm.dpm.user_state;
184 }
185
186 pm_runtime_mark_last_busy(ddev->dev);
187 pm_runtime_put_autosuspend(ddev->dev);
188
189 return snprintf(buf, PAGE_SIZE, "%s\n",
190 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
191 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
192}
193
194static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
195 struct device_attribute *attr,
196 const char *buf,
197 size_t count)
198{
199 struct drm_device *ddev = dev_get_drvdata(dev);
200 struct amdgpu_device *adev = ddev->dev_private;
201 enum amd_pm_state_type state;
202 int ret;
203
204 if (adev->in_gpu_reset)
205 return -EPERM;
206
207 if (strncmp("battery", buf, strlen("battery")) == 0)
208 state = POWER_STATE_TYPE_BATTERY;
209 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
210 state = POWER_STATE_TYPE_BALANCED;
211 else if (strncmp("performance", buf, strlen("performance")) == 0)
212 state = POWER_STATE_TYPE_PERFORMANCE;
213 else
214 return -EINVAL;
215
216 ret = pm_runtime_get_sync(ddev->dev);
217 if (ret < 0) {
218 pm_runtime_put_autosuspend(ddev->dev);
219 return ret;
220 }
221
222 if (is_support_sw_smu(adev)) {
223 mutex_lock(&adev->pm.mutex);
224 adev->pm.dpm.user_state = state;
225 mutex_unlock(&adev->pm.mutex);
226 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
227 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
228 } else {
229 mutex_lock(&adev->pm.mutex);
230 adev->pm.dpm.user_state = state;
231 mutex_unlock(&adev->pm.mutex);
232
233 amdgpu_pm_compute_clocks(adev);
234 }
235 pm_runtime_mark_last_busy(ddev->dev);
236 pm_runtime_put_autosuspend(ddev->dev);
237
238 return count;
239}
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
302 struct device_attribute *attr,
303 char *buf)
304{
305 struct drm_device *ddev = dev_get_drvdata(dev);
306 struct amdgpu_device *adev = ddev->dev_private;
307 enum amd_dpm_forced_level level = 0xff;
308 int ret;
309
310 if (adev->in_gpu_reset)
311 return -EPERM;
312
313 ret = pm_runtime_get_sync(ddev->dev);
314 if (ret < 0) {
315 pm_runtime_put_autosuspend(ddev->dev);
316 return ret;
317 }
318
319 if (is_support_sw_smu(adev))
320 level = smu_get_performance_level(&adev->smu);
321 else if (adev->powerplay.pp_funcs->get_performance_level)
322 level = amdgpu_dpm_get_performance_level(adev);
323 else
324 level = adev->pm.dpm.forced_level;
325
326 pm_runtime_mark_last_busy(ddev->dev);
327 pm_runtime_put_autosuspend(ddev->dev);
328
329 return snprintf(buf, PAGE_SIZE, "%s\n",
330 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
331 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
332 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
333 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
334 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
335 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
336 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
337 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
338 "unknown");
339}
340
341static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
342 struct device_attribute *attr,
343 const char *buf,
344 size_t count)
345{
346 struct drm_device *ddev = dev_get_drvdata(dev);
347 struct amdgpu_device *adev = ddev->dev_private;
348 enum amd_dpm_forced_level level;
349 enum amd_dpm_forced_level current_level = 0xff;
350 int ret = 0;
351
352 if (adev->in_gpu_reset)
353 return -EPERM;
354
355 if (strncmp("low", buf, strlen("low")) == 0) {
356 level = AMD_DPM_FORCED_LEVEL_LOW;
357 } else if (strncmp("high", buf, strlen("high")) == 0) {
358 level = AMD_DPM_FORCED_LEVEL_HIGH;
359 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
360 level = AMD_DPM_FORCED_LEVEL_AUTO;
361 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
362 level = AMD_DPM_FORCED_LEVEL_MANUAL;
363 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
364 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
365 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
366 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
367 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
368 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
369 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
370 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
371 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
372 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
373 } else {
374 return -EINVAL;
375 }
376
377 ret = pm_runtime_get_sync(ddev->dev);
378 if (ret < 0) {
379 pm_runtime_put_autosuspend(ddev->dev);
380 return ret;
381 }
382
383 if (is_support_sw_smu(adev))
384 current_level = smu_get_performance_level(&adev->smu);
385 else if (adev->powerplay.pp_funcs->get_performance_level)
386 current_level = amdgpu_dpm_get_performance_level(adev);
387
388 if (current_level == level) {
389 pm_runtime_mark_last_busy(ddev->dev);
390 pm_runtime_put_autosuspend(ddev->dev);
391 return count;
392 }
393
394 if (adev->asic_type == CHIP_RAVEN) {
395 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
396 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
397 amdgpu_gfx_off_ctrl(adev, false);
398 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
399 amdgpu_gfx_off_ctrl(adev, true);
400 }
401 }
402
403
404 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
405 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
406 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
407 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
408 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
409 pr_err("Currently not in any profile mode!\n");
410 pm_runtime_mark_last_busy(ddev->dev);
411 pm_runtime_put_autosuspend(ddev->dev);
412 return -EINVAL;
413 }
414
415 if (is_support_sw_smu(adev)) {
416 ret = smu_force_performance_level(&adev->smu, level);
417 if (ret) {
418 pm_runtime_mark_last_busy(ddev->dev);
419 pm_runtime_put_autosuspend(ddev->dev);
420 return -EINVAL;
421 }
422 } else if (adev->powerplay.pp_funcs->force_performance_level) {
423 mutex_lock(&adev->pm.mutex);
424 if (adev->pm.dpm.thermal_active) {
425 mutex_unlock(&adev->pm.mutex);
426 pm_runtime_mark_last_busy(ddev->dev);
427 pm_runtime_put_autosuspend(ddev->dev);
428 return -EINVAL;
429 }
430 ret = amdgpu_dpm_force_performance_level(adev, level);
431 if (ret) {
432 mutex_unlock(&adev->pm.mutex);
433 pm_runtime_mark_last_busy(ddev->dev);
434 pm_runtime_put_autosuspend(ddev->dev);
435 return -EINVAL;
436 } else {
437 adev->pm.dpm.forced_level = level;
438 }
439 mutex_unlock(&adev->pm.mutex);
440 }
441 pm_runtime_mark_last_busy(ddev->dev);
442 pm_runtime_put_autosuspend(ddev->dev);
443
444 return count;
445}
446
447static ssize_t amdgpu_get_pp_num_states(struct device *dev,
448 struct device_attribute *attr,
449 char *buf)
450{
451 struct drm_device *ddev = dev_get_drvdata(dev);
452 struct amdgpu_device *adev = ddev->dev_private;
453 struct pp_states_info data;
454 int i, buf_len, ret;
455
456 if (adev->in_gpu_reset)
457 return -EPERM;
458
459 ret = pm_runtime_get_sync(ddev->dev);
460 if (ret < 0) {
461 pm_runtime_put_autosuspend(ddev->dev);
462 return ret;
463 }
464
465 if (is_support_sw_smu(adev)) {
466 ret = smu_get_power_num_states(&adev->smu, &data);
467 if (ret)
468 return ret;
469 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
470 amdgpu_dpm_get_pp_num_states(adev, &data);
471 } else {
472 memset(&data, 0, sizeof(data));
473 }
474
475 pm_runtime_mark_last_busy(ddev->dev);
476 pm_runtime_put_autosuspend(ddev->dev);
477
478 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
479 for (i = 0; i < data.nums; i++)
480 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
481 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
482 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
483 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
484 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
485
486 return buf_len;
487}
488
489static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
490 struct device_attribute *attr,
491 char *buf)
492{
493 struct drm_device *ddev = dev_get_drvdata(dev);
494 struct amdgpu_device *adev = ddev->dev_private;
495 struct pp_states_info data;
496 struct smu_context *smu = &adev->smu;
497 enum amd_pm_state_type pm = 0;
498 int i = 0, ret = 0;
499
500 if (adev->in_gpu_reset)
501 return -EPERM;
502
503 ret = pm_runtime_get_sync(ddev->dev);
504 if (ret < 0) {
505 pm_runtime_put_autosuspend(ddev->dev);
506 return ret;
507 }
508
509 if (is_support_sw_smu(adev)) {
510 pm = smu_get_current_power_state(smu);
511 ret = smu_get_power_num_states(smu, &data);
512 if (ret)
513 return ret;
514 } else if (adev->powerplay.pp_funcs->get_current_power_state
515 && adev->powerplay.pp_funcs->get_pp_num_states) {
516 pm = amdgpu_dpm_get_current_power_state(adev);
517 amdgpu_dpm_get_pp_num_states(adev, &data);
518 }
519
520 pm_runtime_mark_last_busy(ddev->dev);
521 pm_runtime_put_autosuspend(ddev->dev);
522
523 for (i = 0; i < data.nums; i++) {
524 if (pm == data.states[i])
525 break;
526 }
527
528 if (i == data.nums)
529 i = -EINVAL;
530
531 return snprintf(buf, PAGE_SIZE, "%d\n", i);
532}
533
534static ssize_t amdgpu_get_pp_force_state(struct device *dev,
535 struct device_attribute *attr,
536 char *buf)
537{
538 struct drm_device *ddev = dev_get_drvdata(dev);
539 struct amdgpu_device *adev = ddev->dev_private;
540
541 if (adev->in_gpu_reset)
542 return -EPERM;
543
544 if (adev->pp_force_state_enabled)
545 return amdgpu_get_pp_cur_state(dev, attr, buf);
546 else
547 return snprintf(buf, PAGE_SIZE, "\n");
548}
549
550static ssize_t amdgpu_set_pp_force_state(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf,
553 size_t count)
554{
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = ddev->dev_private;
557 enum amd_pm_state_type state = 0;
558 unsigned long idx;
559 int ret;
560
561 if (adev->in_gpu_reset)
562 return -EPERM;
563
564 if (strlen(buf) == 1)
565 adev->pp_force_state_enabled = false;
566 else if (is_support_sw_smu(adev))
567 adev->pp_force_state_enabled = false;
568 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
569 adev->powerplay.pp_funcs->get_pp_num_states) {
570 struct pp_states_info data;
571
572 ret = kstrtoul(buf, 0, &idx);
573 if (ret || idx >= ARRAY_SIZE(data.states))
574 return -EINVAL;
575
576 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
577
578 amdgpu_dpm_get_pp_num_states(adev, &data);
579 state = data.states[idx];
580
581 ret = pm_runtime_get_sync(ddev->dev);
582 if (ret < 0) {
583 pm_runtime_put_autosuspend(ddev->dev);
584 return ret;
585 }
586
587
588 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
589 state != POWER_STATE_TYPE_DEFAULT) {
590 amdgpu_dpm_dispatch_task(adev,
591 AMD_PP_TASK_ENABLE_USER_STATE, &state);
592 adev->pp_force_state_enabled = true;
593 }
594 pm_runtime_mark_last_busy(ddev->dev);
595 pm_runtime_put_autosuspend(ddev->dev);
596 }
597
598 return count;
599}
600
601
602
603
604
605
606
607
608
609
610
611
612static ssize_t amdgpu_get_pp_table(struct device *dev,
613 struct device_attribute *attr,
614 char *buf)
615{
616 struct drm_device *ddev = dev_get_drvdata(dev);
617 struct amdgpu_device *adev = ddev->dev_private;
618 char *table = NULL;
619 int size, ret;
620
621 if (adev->in_gpu_reset)
622 return -EPERM;
623
624 ret = pm_runtime_get_sync(ddev->dev);
625 if (ret < 0) {
626 pm_runtime_put_autosuspend(ddev->dev);
627 return ret;
628 }
629
630 if (is_support_sw_smu(adev)) {
631 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
632 pm_runtime_mark_last_busy(ddev->dev);
633 pm_runtime_put_autosuspend(ddev->dev);
634 if (size < 0)
635 return size;
636 } else if (adev->powerplay.pp_funcs->get_pp_table) {
637 size = amdgpu_dpm_get_pp_table(adev, &table);
638 pm_runtime_mark_last_busy(ddev->dev);
639 pm_runtime_put_autosuspend(ddev->dev);
640 if (size < 0)
641 return size;
642 } else {
643 pm_runtime_mark_last_busy(ddev->dev);
644 pm_runtime_put_autosuspend(ddev->dev);
645 return 0;
646 }
647
648 if (size >= PAGE_SIZE)
649 size = PAGE_SIZE - 1;
650
651 memcpy(buf, table, size);
652
653 return size;
654}
655
656static ssize_t amdgpu_set_pp_table(struct device *dev,
657 struct device_attribute *attr,
658 const char *buf,
659 size_t count)
660{
661 struct drm_device *ddev = dev_get_drvdata(dev);
662 struct amdgpu_device *adev = ddev->dev_private;
663 int ret = 0;
664
665 if (adev->in_gpu_reset)
666 return -EPERM;
667
668 ret = pm_runtime_get_sync(ddev->dev);
669 if (ret < 0) {
670 pm_runtime_put_autosuspend(ddev->dev);
671 return ret;
672 }
673
674 if (is_support_sw_smu(adev)) {
675 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
676 if (ret) {
677 pm_runtime_mark_last_busy(ddev->dev);
678 pm_runtime_put_autosuspend(ddev->dev);
679 return ret;
680 }
681 } else if (adev->powerplay.pp_funcs->set_pp_table)
682 amdgpu_dpm_set_pp_table(adev, buf, count);
683
684 pm_runtime_mark_last_busy(ddev->dev);
685 pm_runtime_put_autosuspend(ddev->dev);
686
687 return count;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
757 struct device_attribute *attr,
758 const char *buf,
759 size_t count)
760{
761 struct drm_device *ddev = dev_get_drvdata(dev);
762 struct amdgpu_device *adev = ddev->dev_private;
763 int ret;
764 uint32_t parameter_size = 0;
765 long parameter[64];
766 char buf_cpy[128];
767 char *tmp_str;
768 char *sub_str;
769 const char delimiter[3] = {' ', '\n', '\0'};
770 uint32_t type;
771
772 if (adev->in_gpu_reset)
773 return -EPERM;
774
775 if (count > 127)
776 return -EINVAL;
777
778 if (*buf == 's')
779 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
780 else if (*buf == 'm')
781 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
782 else if(*buf == 'r')
783 type = PP_OD_RESTORE_DEFAULT_TABLE;
784 else if (*buf == 'c')
785 type = PP_OD_COMMIT_DPM_TABLE;
786 else if (!strncmp(buf, "vc", 2))
787 type = PP_OD_EDIT_VDDC_CURVE;
788 else
789 return -EINVAL;
790
791 memcpy(buf_cpy, buf, count+1);
792
793 tmp_str = buf_cpy;
794
795 if (type == PP_OD_EDIT_VDDC_CURVE)
796 tmp_str++;
797 while (isspace(*++tmp_str));
798
799 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
800 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
801 if (ret)
802 return -EINVAL;
803 parameter_size++;
804
805 while (isspace(*tmp_str))
806 tmp_str++;
807 }
808
809 ret = pm_runtime_get_sync(ddev->dev);
810 if (ret < 0) {
811 pm_runtime_put_autosuspend(ddev->dev);
812 return ret;
813 }
814
815 if (is_support_sw_smu(adev)) {
816 ret = smu_od_edit_dpm_table(&adev->smu, type,
817 parameter, parameter_size);
818
819 if (ret) {
820 pm_runtime_mark_last_busy(ddev->dev);
821 pm_runtime_put_autosuspend(ddev->dev);
822 return -EINVAL;
823 }
824 } else {
825 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
826 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
827 parameter, parameter_size);
828 if (ret) {
829 pm_runtime_mark_last_busy(ddev->dev);
830 pm_runtime_put_autosuspend(ddev->dev);
831 return -EINVAL;
832 }
833 }
834
835 if (type == PP_OD_COMMIT_DPM_TABLE) {
836 if (adev->powerplay.pp_funcs->dispatch_tasks) {
837 amdgpu_dpm_dispatch_task(adev,
838 AMD_PP_TASK_READJUST_POWER_STATE,
839 NULL);
840 pm_runtime_mark_last_busy(ddev->dev);
841 pm_runtime_put_autosuspend(ddev->dev);
842 return count;
843 } else {
844 pm_runtime_mark_last_busy(ddev->dev);
845 pm_runtime_put_autosuspend(ddev->dev);
846 return -EINVAL;
847 }
848 }
849 }
850 pm_runtime_mark_last_busy(ddev->dev);
851 pm_runtime_put_autosuspend(ddev->dev);
852
853 return count;
854}
855
856static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
857 struct device_attribute *attr,
858 char *buf)
859{
860 struct drm_device *ddev = dev_get_drvdata(dev);
861 struct amdgpu_device *adev = ddev->dev_private;
862 ssize_t size;
863 int ret;
864
865 if (adev->in_gpu_reset)
866 return -EPERM;
867
868 ret = pm_runtime_get_sync(ddev->dev);
869 if (ret < 0) {
870 pm_runtime_put_autosuspend(ddev->dev);
871 return ret;
872 }
873
874 if (is_support_sw_smu(adev)) {
875 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
876 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
877 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
878 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
879 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
880 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
881 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
882 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
883 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
884 } else {
885 size = snprintf(buf, PAGE_SIZE, "\n");
886 }
887 pm_runtime_mark_last_busy(ddev->dev);
888 pm_runtime_put_autosuspend(ddev->dev);
889
890 return size;
891}
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909static ssize_t amdgpu_set_pp_features(struct device *dev,
910 struct device_attribute *attr,
911 const char *buf,
912 size_t count)
913{
914 struct drm_device *ddev = dev_get_drvdata(dev);
915 struct amdgpu_device *adev = ddev->dev_private;
916 uint64_t featuremask;
917 int ret;
918
919 if (adev->in_gpu_reset)
920 return -EPERM;
921
922 ret = kstrtou64(buf, 0, &featuremask);
923 if (ret)
924 return -EINVAL;
925
926 pr_debug("featuremask = 0x%llx\n", featuremask);
927
928 ret = pm_runtime_get_sync(ddev->dev);
929 if (ret < 0) {
930 pm_runtime_put_autosuspend(ddev->dev);
931 return ret;
932 }
933
934 if (is_support_sw_smu(adev)) {
935 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
936 if (ret) {
937 pm_runtime_mark_last_busy(ddev->dev);
938 pm_runtime_put_autosuspend(ddev->dev);
939 return -EINVAL;
940 }
941 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
942 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
943 if (ret) {
944 pm_runtime_mark_last_busy(ddev->dev);
945 pm_runtime_put_autosuspend(ddev->dev);
946 return -EINVAL;
947 }
948 }
949 pm_runtime_mark_last_busy(ddev->dev);
950 pm_runtime_put_autosuspend(ddev->dev);
951
952 return count;
953}
954
955static ssize_t amdgpu_get_pp_features(struct device *dev,
956 struct device_attribute *attr,
957 char *buf)
958{
959 struct drm_device *ddev = dev_get_drvdata(dev);
960 struct amdgpu_device *adev = ddev->dev_private;
961 ssize_t size;
962 int ret;
963
964 if (adev->in_gpu_reset)
965 return -EPERM;
966
967 ret = pm_runtime_get_sync(ddev->dev);
968 if (ret < 0) {
969 pm_runtime_put_autosuspend(ddev->dev);
970 return ret;
971 }
972
973 if (is_support_sw_smu(adev))
974 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
975 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
976 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
977 else
978 size = snprintf(buf, PAGE_SIZE, "\n");
979
980 pm_runtime_mark_last_busy(ddev->dev);
981 pm_runtime_put_autosuspend(ddev->dev);
982
983 return size;
984}
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1017 struct device_attribute *attr,
1018 char *buf)
1019{
1020 struct drm_device *ddev = dev_get_drvdata(dev);
1021 struct amdgpu_device *adev = ddev->dev_private;
1022 ssize_t size;
1023 int ret;
1024
1025 if (adev->in_gpu_reset)
1026 return -EPERM;
1027
1028 ret = pm_runtime_get_sync(ddev->dev);
1029 if (ret < 0) {
1030 pm_runtime_put_autosuspend(ddev->dev);
1031 return ret;
1032 }
1033
1034 if (is_support_sw_smu(adev))
1035 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1036 else if (adev->powerplay.pp_funcs->print_clock_levels)
1037 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1038 else
1039 size = snprintf(buf, PAGE_SIZE, "\n");
1040
1041 pm_runtime_mark_last_busy(ddev->dev);
1042 pm_runtime_put_autosuspend(ddev->dev);
1043
1044 return size;
1045}
1046
1047
1048
1049
1050
1051#define AMDGPU_MASK_BUF_MAX (32 * 13)
1052
1053static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1054{
1055 int ret;
1056 long level;
1057 char *sub_str = NULL;
1058 char *tmp;
1059 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1060 const char delimiter[3] = {' ', '\n', '\0'};
1061 size_t bytes;
1062
1063 *mask = 0;
1064
1065 bytes = min(count, sizeof(buf_cpy) - 1);
1066 memcpy(buf_cpy, buf, bytes);
1067 buf_cpy[bytes] = '\0';
1068 tmp = buf_cpy;
1069 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1070 if (strlen(sub_str)) {
1071 ret = kstrtol(sub_str, 0, &level);
1072 if (ret)
1073 return -EINVAL;
1074 *mask |= 1 << level;
1075 } else
1076 break;
1077 }
1078
1079 return 0;
1080}
1081
1082static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1083 struct device_attribute *attr,
1084 const char *buf,
1085 size_t count)
1086{
1087 struct drm_device *ddev = dev_get_drvdata(dev);
1088 struct amdgpu_device *adev = ddev->dev_private;
1089 int ret;
1090 uint32_t mask = 0;
1091
1092 if (adev->in_gpu_reset)
1093 return -EPERM;
1094
1095 ret = amdgpu_read_mask(buf, count, &mask);
1096 if (ret)
1097 return ret;
1098
1099 ret = pm_runtime_get_sync(ddev->dev);
1100 if (ret < 0) {
1101 pm_runtime_put_autosuspend(ddev->dev);
1102 return ret;
1103 }
1104
1105 if (is_support_sw_smu(adev))
1106 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1107 else if (adev->powerplay.pp_funcs->force_clock_level)
1108 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1109
1110 pm_runtime_mark_last_busy(ddev->dev);
1111 pm_runtime_put_autosuspend(ddev->dev);
1112
1113 if (ret)
1114 return -EINVAL;
1115
1116 return count;
1117}
1118
1119static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1120 struct device_attribute *attr,
1121 char *buf)
1122{
1123 struct drm_device *ddev = dev_get_drvdata(dev);
1124 struct amdgpu_device *adev = ddev->dev_private;
1125 ssize_t size;
1126 int ret;
1127
1128 if (adev->in_gpu_reset)
1129 return -EPERM;
1130
1131 ret = pm_runtime_get_sync(ddev->dev);
1132 if (ret < 0) {
1133 pm_runtime_put_autosuspend(ddev->dev);
1134 return ret;
1135 }
1136
1137 if (is_support_sw_smu(adev))
1138 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1139 else if (adev->powerplay.pp_funcs->print_clock_levels)
1140 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1141 else
1142 size = snprintf(buf, PAGE_SIZE, "\n");
1143
1144 pm_runtime_mark_last_busy(ddev->dev);
1145 pm_runtime_put_autosuspend(ddev->dev);
1146
1147 return size;
1148}
1149
1150static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1151 struct device_attribute *attr,
1152 const char *buf,
1153 size_t count)
1154{
1155 struct drm_device *ddev = dev_get_drvdata(dev);
1156 struct amdgpu_device *adev = ddev->dev_private;
1157 uint32_t mask = 0;
1158 int ret;
1159
1160 if (adev->in_gpu_reset)
1161 return -EPERM;
1162
1163 ret = amdgpu_read_mask(buf, count, &mask);
1164 if (ret)
1165 return ret;
1166
1167 ret = pm_runtime_get_sync(ddev->dev);
1168 if (ret < 0) {
1169 pm_runtime_put_autosuspend(ddev->dev);
1170 return ret;
1171 }
1172
1173 if (is_support_sw_smu(adev))
1174 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1175 else if (adev->powerplay.pp_funcs->force_clock_level)
1176 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1177
1178 pm_runtime_mark_last_busy(ddev->dev);
1179 pm_runtime_put_autosuspend(ddev->dev);
1180
1181 if (ret)
1182 return -EINVAL;
1183
1184 return count;
1185}
1186
1187static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1188 struct device_attribute *attr,
1189 char *buf)
1190{
1191 struct drm_device *ddev = dev_get_drvdata(dev);
1192 struct amdgpu_device *adev = ddev->dev_private;
1193 ssize_t size;
1194 int ret;
1195
1196 if (adev->in_gpu_reset)
1197 return -EPERM;
1198
1199 ret = pm_runtime_get_sync(ddev->dev);
1200 if (ret < 0) {
1201 pm_runtime_put_autosuspend(ddev->dev);
1202 return ret;
1203 }
1204
1205 if (is_support_sw_smu(adev))
1206 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1207 else if (adev->powerplay.pp_funcs->print_clock_levels)
1208 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1209 else
1210 size = snprintf(buf, PAGE_SIZE, "\n");
1211
1212 pm_runtime_mark_last_busy(ddev->dev);
1213 pm_runtime_put_autosuspend(ddev->dev);
1214
1215 return size;
1216}
1217
1218static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf,
1221 size_t count)
1222{
1223 struct drm_device *ddev = dev_get_drvdata(dev);
1224 struct amdgpu_device *adev = ddev->dev_private;
1225 int ret;
1226 uint32_t mask = 0;
1227
1228 if (adev->in_gpu_reset)
1229 return -EPERM;
1230
1231 ret = amdgpu_read_mask(buf, count, &mask);
1232 if (ret)
1233 return ret;
1234
1235 ret = pm_runtime_get_sync(ddev->dev);
1236 if (ret < 0) {
1237 pm_runtime_put_autosuspend(ddev->dev);
1238 return ret;
1239 }
1240
1241 if (is_support_sw_smu(adev))
1242 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1243 else if (adev->powerplay.pp_funcs->force_clock_level)
1244 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1245 else
1246 ret = 0;
1247
1248 pm_runtime_mark_last_busy(ddev->dev);
1249 pm_runtime_put_autosuspend(ddev->dev);
1250
1251 if (ret)
1252 return -EINVAL;
1253
1254 return count;
1255}
1256
1257static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1258 struct device_attribute *attr,
1259 char *buf)
1260{
1261 struct drm_device *ddev = dev_get_drvdata(dev);
1262 struct amdgpu_device *adev = ddev->dev_private;
1263 ssize_t size;
1264 int ret;
1265
1266 if (adev->in_gpu_reset)
1267 return -EPERM;
1268
1269 ret = pm_runtime_get_sync(ddev->dev);
1270 if (ret < 0) {
1271 pm_runtime_put_autosuspend(ddev->dev);
1272 return ret;
1273 }
1274
1275 if (is_support_sw_smu(adev))
1276 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1277 else if (adev->powerplay.pp_funcs->print_clock_levels)
1278 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1279 else
1280 size = snprintf(buf, PAGE_SIZE, "\n");
1281
1282 pm_runtime_mark_last_busy(ddev->dev);
1283 pm_runtime_put_autosuspend(ddev->dev);
1284
1285 return size;
1286}
1287
1288static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1289 struct device_attribute *attr,
1290 const char *buf,
1291 size_t count)
1292{
1293 struct drm_device *ddev = dev_get_drvdata(dev);
1294 struct amdgpu_device *adev = ddev->dev_private;
1295 int ret;
1296 uint32_t mask = 0;
1297
1298 if (adev->in_gpu_reset)
1299 return -EPERM;
1300
1301 ret = amdgpu_read_mask(buf, count, &mask);
1302 if (ret)
1303 return ret;
1304
1305 ret = pm_runtime_get_sync(ddev->dev);
1306 if (ret < 0) {
1307 pm_runtime_put_autosuspend(ddev->dev);
1308 return ret;
1309 }
1310
1311 if (is_support_sw_smu(adev))
1312 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1313 else if (adev->powerplay.pp_funcs->force_clock_level)
1314 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1315 else
1316 ret = 0;
1317
1318 pm_runtime_mark_last_busy(ddev->dev);
1319 pm_runtime_put_autosuspend(ddev->dev);
1320
1321 if (ret)
1322 return -EINVAL;
1323
1324 return count;
1325}
1326
1327static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1328 struct device_attribute *attr,
1329 char *buf)
1330{
1331 struct drm_device *ddev = dev_get_drvdata(dev);
1332 struct amdgpu_device *adev = ddev->dev_private;
1333 ssize_t size;
1334 int ret;
1335
1336 if (adev->in_gpu_reset)
1337 return -EPERM;
1338
1339 ret = pm_runtime_get_sync(ddev->dev);
1340 if (ret < 0) {
1341 pm_runtime_put_autosuspend(ddev->dev);
1342 return ret;
1343 }
1344
1345 if (is_support_sw_smu(adev))
1346 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1347 else if (adev->powerplay.pp_funcs->print_clock_levels)
1348 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1349 else
1350 size = snprintf(buf, PAGE_SIZE, "\n");
1351
1352 pm_runtime_mark_last_busy(ddev->dev);
1353 pm_runtime_put_autosuspend(ddev->dev);
1354
1355 return size;
1356}
1357
1358static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1359 struct device_attribute *attr,
1360 const char *buf,
1361 size_t count)
1362{
1363 struct drm_device *ddev = dev_get_drvdata(dev);
1364 struct amdgpu_device *adev = ddev->dev_private;
1365 int ret;
1366 uint32_t mask = 0;
1367
1368 if (adev->in_gpu_reset)
1369 return -EPERM;
1370
1371 ret = amdgpu_read_mask(buf, count, &mask);
1372 if (ret)
1373 return ret;
1374
1375 ret = pm_runtime_get_sync(ddev->dev);
1376 if (ret < 0) {
1377 pm_runtime_put_autosuspend(ddev->dev);
1378 return ret;
1379 }
1380
1381 if (is_support_sw_smu(adev))
1382 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1383 else if (adev->powerplay.pp_funcs->force_clock_level)
1384 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1385 else
1386 ret = 0;
1387
1388 pm_runtime_mark_last_busy(ddev->dev);
1389 pm_runtime_put_autosuspend(ddev->dev);
1390
1391 if (ret)
1392 return -EINVAL;
1393
1394 return count;
1395}
1396
1397static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1398 struct device_attribute *attr,
1399 char *buf)
1400{
1401 struct drm_device *ddev = dev_get_drvdata(dev);
1402 struct amdgpu_device *adev = ddev->dev_private;
1403 ssize_t size;
1404 int ret;
1405
1406 if (adev->in_gpu_reset)
1407 return -EPERM;
1408
1409 ret = pm_runtime_get_sync(ddev->dev);
1410 if (ret < 0) {
1411 pm_runtime_put_autosuspend(ddev->dev);
1412 return ret;
1413 }
1414
1415 if (is_support_sw_smu(adev))
1416 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1417 else if (adev->powerplay.pp_funcs->print_clock_levels)
1418 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1419 else
1420 size = snprintf(buf, PAGE_SIZE, "\n");
1421
1422 pm_runtime_mark_last_busy(ddev->dev);
1423 pm_runtime_put_autosuspend(ddev->dev);
1424
1425 return size;
1426}
1427
1428static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1429 struct device_attribute *attr,
1430 const char *buf,
1431 size_t count)
1432{
1433 struct drm_device *ddev = dev_get_drvdata(dev);
1434 struct amdgpu_device *adev = ddev->dev_private;
1435 int ret;
1436 uint32_t mask = 0;
1437
1438 if (adev->in_gpu_reset)
1439 return -EPERM;
1440
1441 ret = amdgpu_read_mask(buf, count, &mask);
1442 if (ret)
1443 return ret;
1444
1445 ret = pm_runtime_get_sync(ddev->dev);
1446 if (ret < 0) {
1447 pm_runtime_put_autosuspend(ddev->dev);
1448 return ret;
1449 }
1450
1451 if (is_support_sw_smu(adev))
1452 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1453 else if (adev->powerplay.pp_funcs->force_clock_level)
1454 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1455 else
1456 ret = 0;
1457
1458 pm_runtime_mark_last_busy(ddev->dev);
1459 pm_runtime_put_autosuspend(ddev->dev);
1460
1461 if (ret)
1462 return -EINVAL;
1463
1464 return count;
1465}
1466
1467static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1468 struct device_attribute *attr,
1469 char *buf)
1470{
1471 struct drm_device *ddev = dev_get_drvdata(dev);
1472 struct amdgpu_device *adev = ddev->dev_private;
1473 uint32_t value = 0;
1474 int ret;
1475
1476 if (adev->in_gpu_reset)
1477 return -EPERM;
1478
1479 ret = pm_runtime_get_sync(ddev->dev);
1480 if (ret < 0) {
1481 pm_runtime_put_autosuspend(ddev->dev);
1482 return ret;
1483 }
1484
1485 if (is_support_sw_smu(adev))
1486 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1487 else if (adev->powerplay.pp_funcs->get_sclk_od)
1488 value = amdgpu_dpm_get_sclk_od(adev);
1489
1490 pm_runtime_mark_last_busy(ddev->dev);
1491 pm_runtime_put_autosuspend(ddev->dev);
1492
1493 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1494}
1495
1496static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1497 struct device_attribute *attr,
1498 const char *buf,
1499 size_t count)
1500{
1501 struct drm_device *ddev = dev_get_drvdata(dev);
1502 struct amdgpu_device *adev = ddev->dev_private;
1503 int ret;
1504 long int value;
1505
1506 if (adev->in_gpu_reset)
1507 return -EPERM;
1508
1509 ret = kstrtol(buf, 0, &value);
1510
1511 if (ret)
1512 return -EINVAL;
1513
1514 ret = pm_runtime_get_sync(ddev->dev);
1515 if (ret < 0) {
1516 pm_runtime_put_autosuspend(ddev->dev);
1517 return ret;
1518 }
1519
1520 if (is_support_sw_smu(adev)) {
1521 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1522 } else {
1523 if (adev->powerplay.pp_funcs->set_sclk_od)
1524 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1525
1526 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1527 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1528 } else {
1529 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1530 amdgpu_pm_compute_clocks(adev);
1531 }
1532 }
1533
1534 pm_runtime_mark_last_busy(ddev->dev);
1535 pm_runtime_put_autosuspend(ddev->dev);
1536
1537 return count;
1538}
1539
1540static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1541 struct device_attribute *attr,
1542 char *buf)
1543{
1544 struct drm_device *ddev = dev_get_drvdata(dev);
1545 struct amdgpu_device *adev = ddev->dev_private;
1546 uint32_t value = 0;
1547 int ret;
1548
1549 if (adev->in_gpu_reset)
1550 return -EPERM;
1551
1552 ret = pm_runtime_get_sync(ddev->dev);
1553 if (ret < 0) {
1554 pm_runtime_put_autosuspend(ddev->dev);
1555 return ret;
1556 }
1557
1558 if (is_support_sw_smu(adev))
1559 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1560 else if (adev->powerplay.pp_funcs->get_mclk_od)
1561 value = amdgpu_dpm_get_mclk_od(adev);
1562
1563 pm_runtime_mark_last_busy(ddev->dev);
1564 pm_runtime_put_autosuspend(ddev->dev);
1565
1566 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1567}
1568
1569static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1570 struct device_attribute *attr,
1571 const char *buf,
1572 size_t count)
1573{
1574 struct drm_device *ddev = dev_get_drvdata(dev);
1575 struct amdgpu_device *adev = ddev->dev_private;
1576 int ret;
1577 long int value;
1578
1579 if (adev->in_gpu_reset)
1580 return -EPERM;
1581
1582 ret = kstrtol(buf, 0, &value);
1583
1584 if (ret)
1585 return -EINVAL;
1586
1587 ret = pm_runtime_get_sync(ddev->dev);
1588 if (ret < 0) {
1589 pm_runtime_put_autosuspend(ddev->dev);
1590 return ret;
1591 }
1592
1593 if (is_support_sw_smu(adev)) {
1594 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1595 } else {
1596 if (adev->powerplay.pp_funcs->set_mclk_od)
1597 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1598
1599 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1600 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1601 } else {
1602 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1603 amdgpu_pm_compute_clocks(adev);
1604 }
1605 }
1606
1607 pm_runtime_mark_last_busy(ddev->dev);
1608 pm_runtime_put_autosuspend(ddev->dev);
1609
1610 return count;
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1634 struct device_attribute *attr,
1635 char *buf)
1636{
1637 struct drm_device *ddev = dev_get_drvdata(dev);
1638 struct amdgpu_device *adev = ddev->dev_private;
1639 ssize_t size;
1640 int ret;
1641
1642 if (adev->in_gpu_reset)
1643 return -EPERM;
1644
1645 ret = pm_runtime_get_sync(ddev->dev);
1646 if (ret < 0) {
1647 pm_runtime_put_autosuspend(ddev->dev);
1648 return ret;
1649 }
1650
1651 if (is_support_sw_smu(adev))
1652 size = smu_get_power_profile_mode(&adev->smu, buf);
1653 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1654 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1655 else
1656 size = snprintf(buf, PAGE_SIZE, "\n");
1657
1658 pm_runtime_mark_last_busy(ddev->dev);
1659 pm_runtime_put_autosuspend(ddev->dev);
1660
1661 return size;
1662}
1663
1664
1665static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1666 struct device_attribute *attr,
1667 const char *buf,
1668 size_t count)
1669{
1670 int ret;
1671 struct drm_device *ddev = dev_get_drvdata(dev);
1672 struct amdgpu_device *adev = ddev->dev_private;
1673 uint32_t parameter_size = 0;
1674 long parameter[64];
1675 char *sub_str, buf_cpy[128];
1676 char *tmp_str;
1677 uint32_t i = 0;
1678 char tmp[2];
1679 long int profile_mode = 0;
1680 const char delimiter[3] = {' ', '\n', '\0'};
1681
1682 if (adev->in_gpu_reset)
1683 return -EPERM;
1684
1685 tmp[0] = *(buf);
1686 tmp[1] = '\0';
1687 ret = kstrtol(tmp, 0, &profile_mode);
1688 if (ret)
1689 return -EINVAL;
1690
1691 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1692 if (count < 2 || count > 127)
1693 return -EINVAL;
1694 while (isspace(*++buf))
1695 i++;
1696 memcpy(buf_cpy, buf, count-i);
1697 tmp_str = buf_cpy;
1698 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1699 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1700 if (ret)
1701 return -EINVAL;
1702 parameter_size++;
1703 while (isspace(*tmp_str))
1704 tmp_str++;
1705 }
1706 }
1707 parameter[parameter_size] = profile_mode;
1708
1709 ret = pm_runtime_get_sync(ddev->dev);
1710 if (ret < 0) {
1711 pm_runtime_put_autosuspend(ddev->dev);
1712 return ret;
1713 }
1714
1715 if (is_support_sw_smu(adev))
1716 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1717 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1718 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1719
1720 pm_runtime_mark_last_busy(ddev->dev);
1721 pm_runtime_put_autosuspend(ddev->dev);
1722
1723 if (!ret)
1724 return count;
1725
1726 return -EINVAL;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1738 struct device_attribute *attr,
1739 char *buf)
1740{
1741 struct drm_device *ddev = dev_get_drvdata(dev);
1742 struct amdgpu_device *adev = ddev->dev_private;
1743 int r, value, size = sizeof(value);
1744
1745 if (adev->in_gpu_reset)
1746 return -EPERM;
1747
1748 r = pm_runtime_get_sync(ddev->dev);
1749 if (r < 0) {
1750 pm_runtime_put_autosuspend(ddev->dev);
1751 return r;
1752 }
1753
1754
1755 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1756 (void *)&value, &size);
1757
1758 pm_runtime_mark_last_busy(ddev->dev);
1759 pm_runtime_put_autosuspend(ddev->dev);
1760
1761 if (r)
1762 return r;
1763
1764 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1765}
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1776 struct device_attribute *attr,
1777 char *buf)
1778{
1779 struct drm_device *ddev = dev_get_drvdata(dev);
1780 struct amdgpu_device *adev = ddev->dev_private;
1781 int r, value, size = sizeof(value);
1782
1783 if (adev->in_gpu_reset)
1784 return -EPERM;
1785
1786 r = pm_runtime_get_sync(ddev->dev);
1787 if (r < 0) {
1788 pm_runtime_put_autosuspend(ddev->dev);
1789 return r;
1790 }
1791
1792
1793 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1794 (void *)&value, &size);
1795
1796 pm_runtime_mark_last_busy(ddev->dev);
1797 pm_runtime_put_autosuspend(ddev->dev);
1798
1799 if (r)
1800 return r;
1801
1802 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1818 struct device_attribute *attr,
1819 char *buf)
1820{
1821 struct drm_device *ddev = dev_get_drvdata(dev);
1822 struct amdgpu_device *adev = ddev->dev_private;
1823 uint64_t count0 = 0, count1 = 0;
1824 int ret;
1825
1826 if (adev->in_gpu_reset)
1827 return -EPERM;
1828
1829 if (adev->flags & AMD_IS_APU)
1830 return -ENODATA;
1831
1832 if (!adev->asic_funcs->get_pcie_usage)
1833 return -ENODATA;
1834
1835 ret = pm_runtime_get_sync(ddev->dev);
1836 if (ret < 0) {
1837 pm_runtime_put_autosuspend(ddev->dev);
1838 return ret;
1839 }
1840
1841 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1842
1843 pm_runtime_mark_last_busy(ddev->dev);
1844 pm_runtime_put_autosuspend(ddev->dev);
1845
1846 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1847 count0, count1, pcie_get_mps(adev->pdev));
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860static ssize_t amdgpu_get_unique_id(struct device *dev,
1861 struct device_attribute *attr,
1862 char *buf)
1863{
1864 struct drm_device *ddev = dev_get_drvdata(dev);
1865 struct amdgpu_device *adev = ddev->dev_private;
1866
1867 if (adev->in_gpu_reset)
1868 return -EPERM;
1869
1870 if (adev->unique_id)
1871 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1872
1873 return 0;
1874}
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1892 struct device_attribute *attr,
1893 char *buf)
1894{
1895 struct drm_device *ddev = dev_get_drvdata(dev);
1896 struct amdgpu_device *adev = ddev->dev_private;
1897
1898 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
1899 adev->ddev->unique,
1900 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1901 adev->throttling_logging_rs.interval / HZ + 1);
1902}
1903
1904static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1905 struct device_attribute *attr,
1906 const char *buf,
1907 size_t count)
1908{
1909 struct drm_device *ddev = dev_get_drvdata(dev);
1910 struct amdgpu_device *adev = ddev->dev_private;
1911 long throttling_logging_interval;
1912 unsigned long flags;
1913 int ret = 0;
1914
1915 ret = kstrtol(buf, 0, &throttling_logging_interval);
1916 if (ret)
1917 return ret;
1918
1919 if (throttling_logging_interval > 3600)
1920 return -EINVAL;
1921
1922 if (throttling_logging_interval > 0) {
1923 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1924
1925
1926
1927
1928 adev->throttling_logging_rs.interval =
1929 (throttling_logging_interval - 1) * HZ;
1930 adev->throttling_logging_rs.begin = 0;
1931 adev->throttling_logging_rs.printed = 0;
1932 adev->throttling_logging_rs.missed = 0;
1933 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1934
1935 atomic_set(&adev->throttling_logging_enabled, 1);
1936 } else {
1937 atomic_set(&adev->throttling_logging_enabled, 0);
1938 }
1939
1940 return count;
1941}
1942
1943static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1944 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1945 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
1946 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
1947 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
1948 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
1949 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
1950 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1951 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1952 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1953 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1954 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
1955 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
1956 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
1957 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
1958 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
1959 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
1960 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
1961 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
1962 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
1963 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
1964 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
1965 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
1966};
1967
1968static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1969 uint32_t mask, enum amdgpu_device_attr_states *states)
1970{
1971 struct device_attribute *dev_attr = &attr->dev_attr;
1972 const char *attr_name = dev_attr->attr.name;
1973 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1974 enum amd_asic_type asic_type = adev->asic_type;
1975
1976 if (!(attr->flags & mask)) {
1977 *states = ATTR_STATE_UNSUPPORTED;
1978 return 0;
1979 }
1980
1981#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
1982
1983 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1984 if (asic_type < CHIP_VEGA10)
1985 *states = ATTR_STATE_UNSUPPORTED;
1986 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1987 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
1988 *states = ATTR_STATE_UNSUPPORTED;
1989 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1990 if (asic_type < CHIP_VEGA20)
1991 *states = ATTR_STATE_UNSUPPORTED;
1992 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
1993 if (asic_type == CHIP_ARCTURUS)
1994 *states = ATTR_STATE_UNSUPPORTED;
1995 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1996 *states = ATTR_STATE_UNSUPPORTED;
1997 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
1998 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1999 *states = ATTR_STATE_SUPPORTED;
2000 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2001 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2002 *states = ATTR_STATE_UNSUPPORTED;
2003 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2004
2005 if (adev->flags & AMD_IS_APU)
2006 *states = ATTR_STATE_UNSUPPORTED;
2007 } else if (DEVICE_ATTR_IS(unique_id)) {
2008 if (asic_type != CHIP_VEGA10 &&
2009 asic_type != CHIP_VEGA20 &&
2010 asic_type != CHIP_ARCTURUS)
2011 *states = ATTR_STATE_UNSUPPORTED;
2012 } else if (DEVICE_ATTR_IS(pp_features)) {
2013 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2014 *states = ATTR_STATE_UNSUPPORTED;
2015 }
2016
2017 if (asic_type == CHIP_ARCTURUS) {
2018
2019 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2020 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2021 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2022 dev_attr->attr.mode &= ~S_IWUGO;
2023 dev_attr->store = NULL;
2024 }
2025 }
2026
2027#undef DEVICE_ATTR_IS
2028
2029 return 0;
2030}
2031
2032
2033static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2034 struct amdgpu_device_attr *attr,
2035 uint32_t mask, struct list_head *attr_list)
2036{
2037 int ret = 0;
2038 struct device_attribute *dev_attr = &attr->dev_attr;
2039 const char *name = dev_attr->attr.name;
2040 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2041 struct amdgpu_device_attr_entry *attr_entry;
2042
2043 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2044 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2045
2046 BUG_ON(!attr);
2047
2048 attr_update = attr->attr_update ? attr_update : default_attr_update;
2049
2050 ret = attr_update(adev, attr, mask, &attr_states);
2051 if (ret) {
2052 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2053 name, ret);
2054 return ret;
2055 }
2056
2057 if (attr_states == ATTR_STATE_UNSUPPORTED)
2058 return 0;
2059
2060 ret = device_create_file(adev->dev, dev_attr);
2061 if (ret) {
2062 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2063 name, ret);
2064 }
2065
2066 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2067 if (!attr_entry)
2068 return -ENOMEM;
2069
2070 attr_entry->attr = attr;
2071 INIT_LIST_HEAD(&attr_entry->entry);
2072
2073 list_add_tail(&attr_entry->entry, attr_list);
2074
2075 return ret;
2076}
2077
2078static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2079{
2080 struct device_attribute *dev_attr = &attr->dev_attr;
2081
2082 device_remove_file(adev->dev, dev_attr);
2083}
2084
2085static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2086 struct list_head *attr_list);
2087
2088static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2089 struct amdgpu_device_attr *attrs,
2090 uint32_t counts,
2091 uint32_t mask,
2092 struct list_head *attr_list)
2093{
2094 int ret = 0;
2095 uint32_t i = 0;
2096
2097 for (i = 0; i < counts; i++) {
2098 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2099 if (ret)
2100 goto failed;
2101 }
2102
2103 return 0;
2104
2105failed:
2106 amdgpu_device_attr_remove_groups(adev, attr_list);
2107
2108 return ret;
2109}
2110
2111static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2112 struct list_head *attr_list)
2113{
2114 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2115
2116 if (list_empty(attr_list))
2117 return ;
2118
2119 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2120 amdgpu_device_attr_remove(adev, entry->attr);
2121 list_del(&entry->entry);
2122 kfree(entry);
2123 }
2124}
2125
2126static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2127 struct device_attribute *attr,
2128 char *buf)
2129{
2130 struct amdgpu_device *adev = dev_get_drvdata(dev);
2131 int channel = to_sensor_dev_attr(attr)->index;
2132 int r, temp = 0, size = sizeof(temp);
2133
2134 if (adev->in_gpu_reset)
2135 return -EPERM;
2136
2137 if (channel >= PP_TEMP_MAX)
2138 return -EINVAL;
2139
2140 r = pm_runtime_get_sync(adev->ddev->dev);
2141 if (r < 0) {
2142 pm_runtime_put_autosuspend(adev->ddev->dev);
2143 return r;
2144 }
2145
2146 switch (channel) {
2147 case PP_TEMP_JUNCTION:
2148
2149 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2150 (void *)&temp, &size);
2151 break;
2152 case PP_TEMP_EDGE:
2153
2154 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2155 (void *)&temp, &size);
2156 break;
2157 case PP_TEMP_MEM:
2158
2159 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2160 (void *)&temp, &size);
2161 break;
2162 default:
2163 r = -EINVAL;
2164 break;
2165 }
2166
2167 pm_runtime_mark_last_busy(adev->ddev->dev);
2168 pm_runtime_put_autosuspend(adev->ddev->dev);
2169
2170 if (r)
2171 return r;
2172
2173 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2174}
2175
2176static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2177 struct device_attribute *attr,
2178 char *buf)
2179{
2180 struct amdgpu_device *adev = dev_get_drvdata(dev);
2181 int hyst = to_sensor_dev_attr(attr)->index;
2182 int temp;
2183
2184 if (hyst)
2185 temp = adev->pm.dpm.thermal.min_temp;
2186 else
2187 temp = adev->pm.dpm.thermal.max_temp;
2188
2189 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2190}
2191
2192static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2193 struct device_attribute *attr,
2194 char *buf)
2195{
2196 struct amdgpu_device *adev = dev_get_drvdata(dev);
2197 int hyst = to_sensor_dev_attr(attr)->index;
2198 int temp;
2199
2200 if (hyst)
2201 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2202 else
2203 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2204
2205 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2206}
2207
2208static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2209 struct device_attribute *attr,
2210 char *buf)
2211{
2212 struct amdgpu_device *adev = dev_get_drvdata(dev);
2213 int hyst = to_sensor_dev_attr(attr)->index;
2214 int temp;
2215
2216 if (hyst)
2217 temp = adev->pm.dpm.thermal.min_mem_temp;
2218 else
2219 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2220
2221 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2222}
2223
2224static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2225 struct device_attribute *attr,
2226 char *buf)
2227{
2228 int channel = to_sensor_dev_attr(attr)->index;
2229
2230 if (channel >= PP_TEMP_MAX)
2231 return -EINVAL;
2232
2233 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2234}
2235
2236static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2237 struct device_attribute *attr,
2238 char *buf)
2239{
2240 struct amdgpu_device *adev = dev_get_drvdata(dev);
2241 int channel = to_sensor_dev_attr(attr)->index;
2242 int temp = 0;
2243
2244 if (channel >= PP_TEMP_MAX)
2245 return -EINVAL;
2246
2247 switch (channel) {
2248 case PP_TEMP_JUNCTION:
2249 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2250 break;
2251 case PP_TEMP_EDGE:
2252 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2253 break;
2254 case PP_TEMP_MEM:
2255 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2256 break;
2257 }
2258
2259 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2260}
2261
2262static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2263 struct device_attribute *attr,
2264 char *buf)
2265{
2266 struct amdgpu_device *adev = dev_get_drvdata(dev);
2267 u32 pwm_mode = 0;
2268 int ret;
2269
2270 if (adev->in_gpu_reset)
2271 return -EPERM;
2272
2273 ret = pm_runtime_get_sync(adev->ddev->dev);
2274 if (ret < 0) {
2275 pm_runtime_put_autosuspend(adev->ddev->dev);
2276 return ret;
2277 }
2278
2279 if (is_support_sw_smu(adev)) {
2280 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2281 } else {
2282 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2283 pm_runtime_mark_last_busy(adev->ddev->dev);
2284 pm_runtime_put_autosuspend(adev->ddev->dev);
2285 return -EINVAL;
2286 }
2287
2288 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2289 }
2290
2291 pm_runtime_mark_last_busy(adev->ddev->dev);
2292 pm_runtime_put_autosuspend(adev->ddev->dev);
2293
2294 return sprintf(buf, "%i\n", pwm_mode);
2295}
2296
2297static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2298 struct device_attribute *attr,
2299 const char *buf,
2300 size_t count)
2301{
2302 struct amdgpu_device *adev = dev_get_drvdata(dev);
2303 int err, ret;
2304 int value;
2305
2306 if (adev->in_gpu_reset)
2307 return -EPERM;
2308
2309 err = kstrtoint(buf, 10, &value);
2310 if (err)
2311 return err;
2312
2313 ret = pm_runtime_get_sync(adev->ddev->dev);
2314 if (ret < 0) {
2315 pm_runtime_put_autosuspend(adev->ddev->dev);
2316 return ret;
2317 }
2318
2319 if (is_support_sw_smu(adev)) {
2320 smu_set_fan_control_mode(&adev->smu, value);
2321 } else {
2322 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2323 pm_runtime_mark_last_busy(adev->ddev->dev);
2324 pm_runtime_put_autosuspend(adev->ddev->dev);
2325 return -EINVAL;
2326 }
2327
2328 amdgpu_dpm_set_fan_control_mode(adev, value);
2329 }
2330
2331 pm_runtime_mark_last_busy(adev->ddev->dev);
2332 pm_runtime_put_autosuspend(adev->ddev->dev);
2333
2334 return count;
2335}
2336
2337static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2338 struct device_attribute *attr,
2339 char *buf)
2340{
2341 return sprintf(buf, "%i\n", 0);
2342}
2343
2344static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2345 struct device_attribute *attr,
2346 char *buf)
2347{
2348 return sprintf(buf, "%i\n", 255);
2349}
2350
2351static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2352 struct device_attribute *attr,
2353 const char *buf, size_t count)
2354{
2355 struct amdgpu_device *adev = dev_get_drvdata(dev);
2356 int err;
2357 u32 value;
2358 u32 pwm_mode;
2359
2360 if (adev->in_gpu_reset)
2361 return -EPERM;
2362
2363 err = pm_runtime_get_sync(adev->ddev->dev);
2364 if (err < 0) {
2365 pm_runtime_put_autosuspend(adev->ddev->dev);
2366 return err;
2367 }
2368
2369 if (is_support_sw_smu(adev))
2370 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2371 else
2372 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2373
2374 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2375 pr_info("manual fan speed control should be enabled first\n");
2376 pm_runtime_mark_last_busy(adev->ddev->dev);
2377 pm_runtime_put_autosuspend(adev->ddev->dev);
2378 return -EINVAL;
2379 }
2380
2381 err = kstrtou32(buf, 10, &value);
2382 if (err) {
2383 pm_runtime_mark_last_busy(adev->ddev->dev);
2384 pm_runtime_put_autosuspend(adev->ddev->dev);
2385 return err;
2386 }
2387
2388 value = (value * 100) / 255;
2389
2390 if (is_support_sw_smu(adev))
2391 err = smu_set_fan_speed_percent(&adev->smu, value);
2392 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2393 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2394 else
2395 err = -EINVAL;
2396
2397 pm_runtime_mark_last_busy(adev->ddev->dev);
2398 pm_runtime_put_autosuspend(adev->ddev->dev);
2399
2400 if (err)
2401 return err;
2402
2403 return count;
2404}
2405
2406static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2407 struct device_attribute *attr,
2408 char *buf)
2409{
2410 struct amdgpu_device *adev = dev_get_drvdata(dev);
2411 int err;
2412 u32 speed = 0;
2413
2414 if (adev->in_gpu_reset)
2415 return -EPERM;
2416
2417 err = pm_runtime_get_sync(adev->ddev->dev);
2418 if (err < 0) {
2419 pm_runtime_put_autosuspend(adev->ddev->dev);
2420 return err;
2421 }
2422
2423 if (is_support_sw_smu(adev))
2424 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2425 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2426 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2427 else
2428 err = -EINVAL;
2429
2430 pm_runtime_mark_last_busy(adev->ddev->dev);
2431 pm_runtime_put_autosuspend(adev->ddev->dev);
2432
2433 if (err)
2434 return err;
2435
2436 speed = (speed * 255) / 100;
2437
2438 return sprintf(buf, "%i\n", speed);
2439}
2440
2441static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2442 struct device_attribute *attr,
2443 char *buf)
2444{
2445 struct amdgpu_device *adev = dev_get_drvdata(dev);
2446 int err;
2447 u32 speed = 0;
2448
2449 if (adev->in_gpu_reset)
2450 return -EPERM;
2451
2452 err = pm_runtime_get_sync(adev->ddev->dev);
2453 if (err < 0) {
2454 pm_runtime_put_autosuspend(adev->ddev->dev);
2455 return err;
2456 }
2457
2458 if (is_support_sw_smu(adev))
2459 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2460 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2461 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2462 else
2463 err = -EINVAL;
2464
2465 pm_runtime_mark_last_busy(adev->ddev->dev);
2466 pm_runtime_put_autosuspend(adev->ddev->dev);
2467
2468 if (err)
2469 return err;
2470
2471 return sprintf(buf, "%i\n", speed);
2472}
2473
2474static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2475 struct device_attribute *attr,
2476 char *buf)
2477{
2478 struct amdgpu_device *adev = dev_get_drvdata(dev);
2479 u32 min_rpm = 0;
2480 u32 size = sizeof(min_rpm);
2481 int r;
2482
2483 if (adev->in_gpu_reset)
2484 return -EPERM;
2485
2486 r = pm_runtime_get_sync(adev->ddev->dev);
2487 if (r < 0) {
2488 pm_runtime_put_autosuspend(adev->ddev->dev);
2489 return r;
2490 }
2491
2492 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2493 (void *)&min_rpm, &size);
2494
2495 pm_runtime_mark_last_busy(adev->ddev->dev);
2496 pm_runtime_put_autosuspend(adev->ddev->dev);
2497
2498 if (r)
2499 return r;
2500
2501 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2502}
2503
2504static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2505 struct device_attribute *attr,
2506 char *buf)
2507{
2508 struct amdgpu_device *adev = dev_get_drvdata(dev);
2509 u32 max_rpm = 0;
2510 u32 size = sizeof(max_rpm);
2511 int r;
2512
2513 if (adev->in_gpu_reset)
2514 return -EPERM;
2515
2516 r = pm_runtime_get_sync(adev->ddev->dev);
2517 if (r < 0) {
2518 pm_runtime_put_autosuspend(adev->ddev->dev);
2519 return r;
2520 }
2521
2522 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2523 (void *)&max_rpm, &size);
2524
2525 pm_runtime_mark_last_busy(adev->ddev->dev);
2526 pm_runtime_put_autosuspend(adev->ddev->dev);
2527
2528 if (r)
2529 return r;
2530
2531 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2532}
2533
2534static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2535 struct device_attribute *attr,
2536 char *buf)
2537{
2538 struct amdgpu_device *adev = dev_get_drvdata(dev);
2539 int err;
2540 u32 rpm = 0;
2541
2542 if (adev->in_gpu_reset)
2543 return -EPERM;
2544
2545 err = pm_runtime_get_sync(adev->ddev->dev);
2546 if (err < 0) {
2547 pm_runtime_put_autosuspend(adev->ddev->dev);
2548 return err;
2549 }
2550
2551 if (is_support_sw_smu(adev))
2552 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2553 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2554 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2555 else
2556 err = -EINVAL;
2557
2558 pm_runtime_mark_last_busy(adev->ddev->dev);
2559 pm_runtime_put_autosuspend(adev->ddev->dev);
2560
2561 if (err)
2562 return err;
2563
2564 return sprintf(buf, "%i\n", rpm);
2565}
2566
2567static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2568 struct device_attribute *attr,
2569 const char *buf, size_t count)
2570{
2571 struct amdgpu_device *adev = dev_get_drvdata(dev);
2572 int err;
2573 u32 value;
2574 u32 pwm_mode;
2575
2576 if (adev->in_gpu_reset)
2577 return -EPERM;
2578
2579 err = pm_runtime_get_sync(adev->ddev->dev);
2580 if (err < 0) {
2581 pm_runtime_put_autosuspend(adev->ddev->dev);
2582 return err;
2583 }
2584
2585 if (is_support_sw_smu(adev))
2586 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2587 else
2588 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2589
2590 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2591 pm_runtime_mark_last_busy(adev->ddev->dev);
2592 pm_runtime_put_autosuspend(adev->ddev->dev);
2593 return -ENODATA;
2594 }
2595
2596 err = kstrtou32(buf, 10, &value);
2597 if (err) {
2598 pm_runtime_mark_last_busy(adev->ddev->dev);
2599 pm_runtime_put_autosuspend(adev->ddev->dev);
2600 return err;
2601 }
2602
2603 if (is_support_sw_smu(adev))
2604 err = smu_set_fan_speed_rpm(&adev->smu, value);
2605 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2606 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2607 else
2608 err = -EINVAL;
2609
2610 pm_runtime_mark_last_busy(adev->ddev->dev);
2611 pm_runtime_put_autosuspend(adev->ddev->dev);
2612
2613 if (err)
2614 return err;
2615
2616 return count;
2617}
2618
2619static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2620 struct device_attribute *attr,
2621 char *buf)
2622{
2623 struct amdgpu_device *adev = dev_get_drvdata(dev);
2624 u32 pwm_mode = 0;
2625 int ret;
2626
2627 if (adev->in_gpu_reset)
2628 return -EPERM;
2629
2630 ret = pm_runtime_get_sync(adev->ddev->dev);
2631 if (ret < 0) {
2632 pm_runtime_put_autosuspend(adev->ddev->dev);
2633 return ret;
2634 }
2635
2636 if (is_support_sw_smu(adev)) {
2637 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2638 } else {
2639 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2640 pm_runtime_mark_last_busy(adev->ddev->dev);
2641 pm_runtime_put_autosuspend(adev->ddev->dev);
2642 return -EINVAL;
2643 }
2644
2645 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2646 }
2647
2648 pm_runtime_mark_last_busy(adev->ddev->dev);
2649 pm_runtime_put_autosuspend(adev->ddev->dev);
2650
2651 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2652}
2653
2654static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2655 struct device_attribute *attr,
2656 const char *buf,
2657 size_t count)
2658{
2659 struct amdgpu_device *adev = dev_get_drvdata(dev);
2660 int err;
2661 int value;
2662 u32 pwm_mode;
2663
2664 if (adev->in_gpu_reset)
2665 return -EPERM;
2666
2667 err = kstrtoint(buf, 10, &value);
2668 if (err)
2669 return err;
2670
2671 if (value == 0)
2672 pwm_mode = AMD_FAN_CTRL_AUTO;
2673 else if (value == 1)
2674 pwm_mode = AMD_FAN_CTRL_MANUAL;
2675 else
2676 return -EINVAL;
2677
2678 err = pm_runtime_get_sync(adev->ddev->dev);
2679 if (err < 0) {
2680 pm_runtime_put_autosuspend(adev->ddev->dev);
2681 return err;
2682 }
2683
2684 if (is_support_sw_smu(adev)) {
2685 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2686 } else {
2687 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2688 pm_runtime_mark_last_busy(adev->ddev->dev);
2689 pm_runtime_put_autosuspend(adev->ddev->dev);
2690 return -EINVAL;
2691 }
2692 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2693 }
2694
2695 pm_runtime_mark_last_busy(adev->ddev->dev);
2696 pm_runtime_put_autosuspend(adev->ddev->dev);
2697
2698 return count;
2699}
2700
2701static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2702 struct device_attribute *attr,
2703 char *buf)
2704{
2705 struct amdgpu_device *adev = dev_get_drvdata(dev);
2706 u32 vddgfx;
2707 int r, size = sizeof(vddgfx);
2708
2709 if (adev->in_gpu_reset)
2710 return -EPERM;
2711
2712 r = pm_runtime_get_sync(adev->ddev->dev);
2713 if (r < 0) {
2714 pm_runtime_put_autosuspend(adev->ddev->dev);
2715 return r;
2716 }
2717
2718
2719 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2720 (void *)&vddgfx, &size);
2721
2722 pm_runtime_mark_last_busy(adev->ddev->dev);
2723 pm_runtime_put_autosuspend(adev->ddev->dev);
2724
2725 if (r)
2726 return r;
2727
2728 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2729}
2730
2731static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2732 struct device_attribute *attr,
2733 char *buf)
2734{
2735 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2736}
2737
2738static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2739 struct device_attribute *attr,
2740 char *buf)
2741{
2742 struct amdgpu_device *adev = dev_get_drvdata(dev);
2743 u32 vddnb;
2744 int r, size = sizeof(vddnb);
2745
2746 if (adev->in_gpu_reset)
2747 return -EPERM;
2748
2749
2750 if (!(adev->flags & AMD_IS_APU))
2751 return -EINVAL;
2752
2753 r = pm_runtime_get_sync(adev->ddev->dev);
2754 if (r < 0) {
2755 pm_runtime_put_autosuspend(adev->ddev->dev);
2756 return r;
2757 }
2758
2759
2760 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2761 (void *)&vddnb, &size);
2762
2763 pm_runtime_mark_last_busy(adev->ddev->dev);
2764 pm_runtime_put_autosuspend(adev->ddev->dev);
2765
2766 if (r)
2767 return r;
2768
2769 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2770}
2771
2772static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2773 struct device_attribute *attr,
2774 char *buf)
2775{
2776 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2777}
2778
2779static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2780 struct device_attribute *attr,
2781 char *buf)
2782{
2783 struct amdgpu_device *adev = dev_get_drvdata(dev);
2784 u32 query = 0;
2785 int r, size = sizeof(u32);
2786 unsigned uw;
2787
2788 if (adev->in_gpu_reset)
2789 return -EPERM;
2790
2791 r = pm_runtime_get_sync(adev->ddev->dev);
2792 if (r < 0) {
2793 pm_runtime_put_autosuspend(adev->ddev->dev);
2794 return r;
2795 }
2796
2797
2798 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2799 (void *)&query, &size);
2800
2801 pm_runtime_mark_last_busy(adev->ddev->dev);
2802 pm_runtime_put_autosuspend(adev->ddev->dev);
2803
2804 if (r)
2805 return r;
2806
2807
2808 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2809
2810 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2811}
2812
2813static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2814 struct device_attribute *attr,
2815 char *buf)
2816{
2817 return sprintf(buf, "%i\n", 0);
2818}
2819
2820static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2821 struct device_attribute *attr,
2822 char *buf)
2823{
2824 struct amdgpu_device *adev = dev_get_drvdata(dev);
2825 uint32_t limit = 0;
2826 ssize_t size;
2827 int r;
2828
2829 if (adev->in_gpu_reset)
2830 return -EPERM;
2831
2832 r = pm_runtime_get_sync(adev->ddev->dev);
2833 if (r < 0) {
2834 pm_runtime_put_autosuspend(adev->ddev->dev);
2835 return r;
2836 }
2837
2838 if (is_support_sw_smu(adev)) {
2839 smu_get_power_limit(&adev->smu, &limit, true);
2840 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2841 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2842 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2843 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2844 } else {
2845 size = snprintf(buf, PAGE_SIZE, "\n");
2846 }
2847
2848 pm_runtime_mark_last_busy(adev->ddev->dev);
2849 pm_runtime_put_autosuspend(adev->ddev->dev);
2850
2851 return size;
2852}
2853
2854static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2855 struct device_attribute *attr,
2856 char *buf)
2857{
2858 struct amdgpu_device *adev = dev_get_drvdata(dev);
2859 uint32_t limit = 0;
2860 ssize_t size;
2861 int r;
2862
2863 if (adev->in_gpu_reset)
2864 return -EPERM;
2865
2866 r = pm_runtime_get_sync(adev->ddev->dev);
2867 if (r < 0) {
2868 pm_runtime_put_autosuspend(adev->ddev->dev);
2869 return r;
2870 }
2871
2872 if (is_support_sw_smu(adev)) {
2873 smu_get_power_limit(&adev->smu, &limit, false);
2874 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2875 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2876 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2877 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2878 } else {
2879 size = snprintf(buf, PAGE_SIZE, "\n");
2880 }
2881
2882 pm_runtime_mark_last_busy(adev->ddev->dev);
2883 pm_runtime_put_autosuspend(adev->ddev->dev);
2884
2885 return size;
2886}
2887
2888
2889static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2890 struct device_attribute *attr,
2891 const char *buf,
2892 size_t count)
2893{
2894 struct amdgpu_device *adev = dev_get_drvdata(dev);
2895 int err;
2896 u32 value;
2897
2898 if (adev->in_gpu_reset)
2899 return -EPERM;
2900
2901 if (amdgpu_sriov_vf(adev))
2902 return -EINVAL;
2903
2904 err = kstrtou32(buf, 10, &value);
2905 if (err)
2906 return err;
2907
2908 value = value / 1000000;
2909
2910
2911 err = pm_runtime_get_sync(adev->ddev->dev);
2912 if (err < 0) {
2913 pm_runtime_put_autosuspend(adev->ddev->dev);
2914 return err;
2915 }
2916
2917 if (is_support_sw_smu(adev))
2918 err = smu_set_power_limit(&adev->smu, value);
2919 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2920 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2921 else
2922 err = -EINVAL;
2923
2924 pm_runtime_mark_last_busy(adev->ddev->dev);
2925 pm_runtime_put_autosuspend(adev->ddev->dev);
2926
2927 if (err)
2928 return err;
2929
2930 return count;
2931}
2932
2933static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2934 struct device_attribute *attr,
2935 char *buf)
2936{
2937 struct amdgpu_device *adev = dev_get_drvdata(dev);
2938 uint32_t sclk;
2939 int r, size = sizeof(sclk);
2940
2941 if (adev->in_gpu_reset)
2942 return -EPERM;
2943
2944 r = pm_runtime_get_sync(adev->ddev->dev);
2945 if (r < 0) {
2946 pm_runtime_put_autosuspend(adev->ddev->dev);
2947 return r;
2948 }
2949
2950
2951 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2952 (void *)&sclk, &size);
2953
2954 pm_runtime_mark_last_busy(adev->ddev->dev);
2955 pm_runtime_put_autosuspend(adev->ddev->dev);
2956
2957 if (r)
2958 return r;
2959
2960 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
2961}
2962
2963static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2964 struct device_attribute *attr,
2965 char *buf)
2966{
2967 return snprintf(buf, PAGE_SIZE, "sclk\n");
2968}
2969
2970static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2971 struct device_attribute *attr,
2972 char *buf)
2973{
2974 struct amdgpu_device *adev = dev_get_drvdata(dev);
2975 uint32_t mclk;
2976 int r, size = sizeof(mclk);
2977
2978 if (adev->in_gpu_reset)
2979 return -EPERM;
2980
2981 r = pm_runtime_get_sync(adev->ddev->dev);
2982 if (r < 0) {
2983 pm_runtime_put_autosuspend(adev->ddev->dev);
2984 return r;
2985 }
2986
2987
2988 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2989 (void *)&mclk, &size);
2990
2991 pm_runtime_mark_last_busy(adev->ddev->dev);
2992 pm_runtime_put_autosuspend(adev->ddev->dev);
2993
2994 if (r)
2995 return r;
2996
2997 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
2998}
2999
3000static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3001 struct device_attribute *attr,
3002 char *buf)
3003{
3004 return snprintf(buf, PAGE_SIZE, "mclk\n");
3005}
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3090static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3091static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3092static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3093static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3094static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3095static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3096static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3097static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3098static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3099static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3100static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3101static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3102static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3103static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3104static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3105static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3106static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3107static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3108static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3109static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3110static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3111static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3112static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3113static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3114static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3115static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3116static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3117static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3118static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3119static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3120static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3121static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3122static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3123static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3124static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3125
3126static struct attribute *hwmon_attributes[] = {
3127 &sensor_dev_attr_temp1_input.dev_attr.attr,
3128 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3129 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3130 &sensor_dev_attr_temp2_input.dev_attr.attr,
3131 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3132 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3133 &sensor_dev_attr_temp3_input.dev_attr.attr,
3134 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3135 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3136 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3137 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3138 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3139 &sensor_dev_attr_temp1_label.dev_attr.attr,
3140 &sensor_dev_attr_temp2_label.dev_attr.attr,
3141 &sensor_dev_attr_temp3_label.dev_attr.attr,
3142 &sensor_dev_attr_pwm1.dev_attr.attr,
3143 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3144 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3145 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3146 &sensor_dev_attr_fan1_input.dev_attr.attr,
3147 &sensor_dev_attr_fan1_min.dev_attr.attr,
3148 &sensor_dev_attr_fan1_max.dev_attr.attr,
3149 &sensor_dev_attr_fan1_target.dev_attr.attr,
3150 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3151 &sensor_dev_attr_in0_input.dev_attr.attr,
3152 &sensor_dev_attr_in0_label.dev_attr.attr,
3153 &sensor_dev_attr_in1_input.dev_attr.attr,
3154 &sensor_dev_attr_in1_label.dev_attr.attr,
3155 &sensor_dev_attr_power1_average.dev_attr.attr,
3156 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3157 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3158 &sensor_dev_attr_power1_cap.dev_attr.attr,
3159 &sensor_dev_attr_freq1_input.dev_attr.attr,
3160 &sensor_dev_attr_freq1_label.dev_attr.attr,
3161 &sensor_dev_attr_freq2_input.dev_attr.attr,
3162 &sensor_dev_attr_freq2_label.dev_attr.attr,
3163 NULL
3164};
3165
3166static umode_t hwmon_attributes_visible(struct kobject *kobj,
3167 struct attribute *attr, int index)
3168{
3169 struct device *dev = kobj_to_dev(kobj);
3170 struct amdgpu_device *adev = dev_get_drvdata(dev);
3171 umode_t effective_mode = attr->mode;
3172
3173
3174 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3175 return 0;
3176
3177
3178 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3179 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3180 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3181 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3182 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3183 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3184 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3185 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3186 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3187 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3188 return 0;
3189
3190
3191 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3192 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3193 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3194 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3195 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3196 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3197 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3198 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3199 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3200 return 0;
3201
3202
3203 if ((adev->flags & AMD_IS_APU) &&
3204 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3205 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3206 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3207 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3208 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3209 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3210 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3211 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3212 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3213 return 0;
3214
3215
3216 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3217 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3218 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3219 return 0;
3220
3221
3222 if (!adev->pm.dpm_enabled &&
3223 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3224 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3225 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3226 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3227 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3228 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3229 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3230 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3231 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3232 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3233 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3234 return 0;
3235
3236 if (!is_support_sw_smu(adev)) {
3237
3238 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3239 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3240 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3241 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3242 effective_mode &= ~S_IRUGO;
3243
3244 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3245 attr == &sensor_dev_attr_pwm1.dev_attr.attr) ||
3246 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3247 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr))
3248 effective_mode &= ~S_IWUSR;
3249 }
3250
3251 if (((adev->flags & AMD_IS_APU) ||
3252 adev->family == AMDGPU_FAMILY_SI ||
3253 adev->family == AMDGPU_FAMILY_KV) &&
3254 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
3255 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3256 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3257 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3258 return 0;
3259
3260 if (!is_support_sw_smu(adev)) {
3261
3262 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3263 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3264 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3265 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3266 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3267 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3268 return 0;
3269
3270 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3271 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3272 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3273 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3274 return 0;
3275 }
3276
3277 if ((adev->family == AMDGPU_FAMILY_SI ||
3278 adev->family == AMDGPU_FAMILY_KV) &&
3279 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3280 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3281 return 0;
3282
3283
3284 if (!(adev->flags & AMD_IS_APU) &&
3285 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3286 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3287 return 0;
3288
3289
3290 if ((adev->flags & AMD_IS_APU) &&
3291 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3292 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3293 return 0;
3294
3295
3296 if (((adev->flags & AMD_IS_APU) ||
3297 adev->asic_type < CHIP_VEGA10) &&
3298 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3299 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3300 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3301 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3302 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3303 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3304 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3305 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3306 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3307 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3308 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3309 return 0;
3310
3311 return effective_mode;
3312}
3313
3314static const struct attribute_group hwmon_attrgroup = {
3315 .attrs = hwmon_attributes,
3316 .is_visible = hwmon_attributes_visible,
3317};
3318
3319static const struct attribute_group *hwmon_groups[] = {
3320 &hwmon_attrgroup,
3321 NULL
3322};
3323
3324void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
3325{
3326 struct amdgpu_device *adev =
3327 container_of(work, struct amdgpu_device,
3328 pm.dpm.thermal.work);
3329
3330 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
3331 int temp, size = sizeof(temp);
3332
3333 if (!adev->pm.dpm_enabled)
3334 return;
3335
3336 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
3337 (void *)&temp, &size)) {
3338 if (temp < adev->pm.dpm.thermal.min_temp)
3339
3340 dpm_state = adev->pm.dpm.user_state;
3341 } else {
3342 if (adev->pm.dpm.thermal.high_to_low)
3343
3344 dpm_state = adev->pm.dpm.user_state;
3345 }
3346 mutex_lock(&adev->pm.mutex);
3347 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
3348 adev->pm.dpm.thermal_active = true;
3349 else
3350 adev->pm.dpm.thermal_active = false;
3351 adev->pm.dpm.state = dpm_state;
3352 mutex_unlock(&adev->pm.mutex);
3353
3354 amdgpu_pm_compute_clocks(adev);
3355}
3356
3357static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
3358 enum amd_pm_state_type dpm_state)
3359{
3360 int i;
3361 struct amdgpu_ps *ps;
3362 u32 ui_class;
3363 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
3364 true : false;
3365
3366
3367 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
3368 if (amdgpu_dpm_vblank_too_short(adev))
3369 single_display = false;
3370 }
3371
3372
3373
3374
3375 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
3376 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
3377
3378 if (dpm_state == POWER_STATE_TYPE_BALANCED)
3379 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3380
3381restart_search:
3382
3383 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
3384 ps = &adev->pm.dpm.ps[i];
3385 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
3386 switch (dpm_state) {
3387
3388 case POWER_STATE_TYPE_BATTERY:
3389 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
3390 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3391 if (single_display)
3392 return ps;
3393 } else
3394 return ps;
3395 }
3396 break;
3397 case POWER_STATE_TYPE_BALANCED:
3398 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
3399 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3400 if (single_display)
3401 return ps;
3402 } else
3403 return ps;
3404 }
3405 break;
3406 case POWER_STATE_TYPE_PERFORMANCE:
3407 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3408 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3409 if (single_display)
3410 return ps;
3411 } else
3412 return ps;
3413 }
3414 break;
3415
3416 case POWER_STATE_TYPE_INTERNAL_UVD:
3417 if (adev->pm.dpm.uvd_ps)
3418 return adev->pm.dpm.uvd_ps;
3419 else
3420 break;
3421 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3422 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3423 return ps;
3424 break;
3425 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3426 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3427 return ps;
3428 break;
3429 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3430 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3431 return ps;
3432 break;
3433 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3434 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3435 return ps;
3436 break;
3437 case POWER_STATE_TYPE_INTERNAL_BOOT:
3438 return adev->pm.dpm.boot_ps;
3439 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3440 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
3441 return ps;
3442 break;
3443 case POWER_STATE_TYPE_INTERNAL_ACPI:
3444 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
3445 return ps;
3446 break;
3447 case POWER_STATE_TYPE_INTERNAL_ULV:
3448 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
3449 return ps;
3450 break;
3451 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3452 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
3453 return ps;
3454 break;
3455 default:
3456 break;
3457 }
3458 }
3459
3460 switch (dpm_state) {
3461 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3462 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
3463 goto restart_search;
3464 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3465 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3466 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3467 if (adev->pm.dpm.uvd_ps) {
3468 return adev->pm.dpm.uvd_ps;
3469 } else {
3470 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3471 goto restart_search;
3472 }
3473 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3474 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
3475 goto restart_search;
3476 case POWER_STATE_TYPE_INTERNAL_ACPI:
3477 dpm_state = POWER_STATE_TYPE_BATTERY;
3478 goto restart_search;
3479 case POWER_STATE_TYPE_BATTERY:
3480 case POWER_STATE_TYPE_BALANCED:
3481 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3482 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3483 goto restart_search;
3484 default:
3485 break;
3486 }
3487
3488 return NULL;
3489}
3490
3491static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
3492{
3493 struct amdgpu_ps *ps;
3494 enum amd_pm_state_type dpm_state;
3495 int ret;
3496 bool equal = false;
3497
3498
3499 if (!adev->pm.dpm_enabled)
3500 return;
3501
3502 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
3503
3504 if ((!adev->pm.dpm.thermal_active) &&
3505 (!adev->pm.dpm.uvd_active))
3506 adev->pm.dpm.state = adev->pm.dpm.user_state;
3507 }
3508 dpm_state = adev->pm.dpm.state;
3509
3510 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
3511 if (ps)
3512 adev->pm.dpm.requested_ps = ps;
3513 else
3514 return;
3515
3516 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
3517 printk("switching from power state:\n");
3518 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
3519 printk("switching to power state:\n");
3520 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
3521 }
3522
3523
3524 ps->vce_active = adev->pm.dpm.vce_active;
3525 if (adev->powerplay.pp_funcs->display_configuration_changed)
3526 amdgpu_dpm_display_configuration_changed(adev);
3527
3528 ret = amdgpu_dpm_pre_set_power_state(adev);
3529 if (ret)
3530 return;
3531
3532 if (adev->powerplay.pp_funcs->check_state_equal) {
3533 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
3534 equal = false;
3535 }
3536
3537 if (equal)
3538 return;
3539
3540 amdgpu_dpm_set_power_state(adev);
3541 amdgpu_dpm_post_set_power_state(adev);
3542
3543 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
3544 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
3545
3546 if (adev->powerplay.pp_funcs->force_performance_level) {
3547 if (adev->pm.dpm.thermal_active) {
3548 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
3549
3550 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
3551
3552 adev->pm.dpm.forced_level = level;
3553 } else {
3554
3555 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
3556 }
3557 }
3558}
3559
3560void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
3561{
3562 int ret = 0;
3563
3564 if (adev->family == AMDGPU_FAMILY_SI) {
3565 mutex_lock(&adev->pm.mutex);
3566 if (enable) {
3567 adev->pm.dpm.uvd_active = true;
3568 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
3569 } else {
3570 adev->pm.dpm.uvd_active = false;
3571 }
3572 mutex_unlock(&adev->pm.mutex);
3573
3574 amdgpu_pm_compute_clocks(adev);
3575 } else {
3576 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
3577 if (ret)
3578 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3579 enable ? "enable" : "disable", ret);
3580
3581
3582 if (adev->asic_type == CHIP_STONEY &&
3583 adev->uvd.decode_image_width >= WIDTH_4K) {
3584 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3585
3586 if (hwmgr && hwmgr->hwmgr_func &&
3587 hwmgr->hwmgr_func->update_nbdpm_pstate)
3588 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
3589 !enable,
3590 true);
3591 }
3592 }
3593}
3594
3595void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
3596{
3597 int ret = 0;
3598
3599 if (adev->family == AMDGPU_FAMILY_SI) {
3600 mutex_lock(&adev->pm.mutex);
3601 if (enable) {
3602 adev->pm.dpm.vce_active = true;
3603
3604 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
3605 } else {
3606 adev->pm.dpm.vce_active = false;
3607 }
3608 mutex_unlock(&adev->pm.mutex);
3609
3610 amdgpu_pm_compute_clocks(adev);
3611 } else {
3612 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
3613 if (ret)
3614 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3615 enable ? "enable" : "disable", ret);
3616 }
3617}
3618
3619void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
3620{
3621 int i;
3622
3623 if (adev->powerplay.pp_funcs->print_power_state == NULL)
3624 return;
3625
3626 for (i = 0; i < adev->pm.dpm.num_ps; i++)
3627 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
3628
3629}
3630
3631void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
3632{
3633 int ret = 0;
3634
3635 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
3636 if (ret)
3637 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3638 enable ? "enable" : "disable", ret);
3639}
3640
3641int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
3642{
3643 int r;
3644
3645 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
3646 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
3647 if (r) {
3648 pr_err("smu firmware loading failed\n");
3649 return r;
3650 }
3651 *smu_version = adev->pm.fw_version;
3652 }
3653 return 0;
3654}
3655
3656int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3657{
3658 int ret;
3659 uint32_t mask = 0;
3660
3661 if (adev->pm.sysfs_initialized)
3662 return 0;
3663
3664 if (adev->pm.dpm_enabled == 0)
3665 return 0;
3666
3667 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3668
3669 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3670 DRIVER_NAME, adev,
3671 hwmon_groups);
3672 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3673 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3674 dev_err(adev->dev,
3675 "Unable to register hwmon device: %d\n", ret);
3676 return ret;
3677 }
3678
3679 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3680 case SRIOV_VF_MODE_ONE_VF:
3681 mask = ATTR_FLAG_ONEVF;
3682 break;
3683 case SRIOV_VF_MODE_MULTI_VF:
3684 mask = 0;
3685 break;
3686 case SRIOV_VF_MODE_BARE_METAL:
3687 default:
3688 mask = ATTR_FLAG_MASK_ALL;
3689 break;
3690 }
3691
3692 ret = amdgpu_device_attr_create_groups(adev,
3693 amdgpu_device_attrs,
3694 ARRAY_SIZE(amdgpu_device_attrs),
3695 mask,
3696 &adev->pm.pm_attr_list);
3697 if (ret)
3698 return ret;
3699
3700 adev->pm.sysfs_initialized = true;
3701
3702 return 0;
3703}
3704
3705void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3706{
3707 if (adev->pm.dpm_enabled == 0)
3708 return;
3709
3710 if (adev->pm.int_hwmon_dev)
3711 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3712
3713 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3714}
3715
3716void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
3717{
3718 int i = 0;
3719
3720 if (!adev->pm.dpm_enabled)
3721 return;
3722
3723 if (adev->mode_info.num_crtc)
3724 amdgpu_display_bandwidth_update(adev);
3725
3726 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3727 struct amdgpu_ring *ring = adev->rings[i];
3728 if (ring && ring->sched.ready)
3729 amdgpu_fence_wait_empty(ring);
3730 }
3731
3732 if (is_support_sw_smu(adev)) {
3733 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3734 smu_handle_task(&adev->smu,
3735 smu_dpm->dpm_level,
3736 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
3737 true);
3738 } else {
3739 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3740 if (!amdgpu_device_has_dc_support(adev)) {
3741 mutex_lock(&adev->pm.mutex);
3742 amdgpu_dpm_get_active_displays(adev);
3743 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3744 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3745 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3746
3747 if (adev->pm.pm_display_cfg.vrefresh > 120)
3748 adev->pm.pm_display_cfg.min_vblank_time = 0;
3749 if (adev->powerplay.pp_funcs->display_configuration_change)
3750 adev->powerplay.pp_funcs->display_configuration_change(
3751 adev->powerplay.pp_handle,
3752 &adev->pm.pm_display_cfg);
3753 mutex_unlock(&adev->pm.mutex);
3754 }
3755 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3756 } else {
3757 mutex_lock(&adev->pm.mutex);
3758 amdgpu_dpm_get_active_displays(adev);
3759 amdgpu_dpm_change_power_state_locked(adev);
3760 mutex_unlock(&adev->pm.mutex);
3761 }
3762 }
3763}
3764
3765
3766
3767
3768#if defined(CONFIG_DEBUG_FS)
3769
3770static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3771{
3772 uint32_t value;
3773 uint64_t value64;
3774 uint32_t query = 0;
3775 int size;
3776
3777
3778 size = sizeof(value);
3779 seq_printf(m, "GFX Clocks and Power:\n");
3780 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3781 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3782 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3783 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3784 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3785 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3786 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3787 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3788 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3789 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3790 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3791 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3792 size = sizeof(uint32_t);
3793 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3794 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3795 size = sizeof(value);
3796 seq_printf(m, "\n");
3797
3798
3799 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3800 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3801
3802
3803 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3804 seq_printf(m, "GPU Load: %u %%\n", value);
3805
3806 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3807 seq_printf(m, "MEM Load: %u %%\n", value);
3808
3809 seq_printf(m, "\n");
3810
3811
3812 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3813 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3814
3815 if (adev->asic_type > CHIP_VEGA20) {
3816
3817 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3818 if (!value) {
3819 seq_printf(m, "VCN: Disabled\n");
3820 } else {
3821 seq_printf(m, "VCN: Enabled\n");
3822 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3823 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3824 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3825 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3826 }
3827 }
3828 seq_printf(m, "\n");
3829 } else {
3830
3831 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3832 if (!value) {
3833 seq_printf(m, "UVD: Disabled\n");
3834 } else {
3835 seq_printf(m, "UVD: Enabled\n");
3836 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3837 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3838 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3839 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3840 }
3841 }
3842 seq_printf(m, "\n");
3843
3844
3845 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3846 if (!value) {
3847 seq_printf(m, "VCE: Disabled\n");
3848 } else {
3849 seq_printf(m, "VCE: Enabled\n");
3850 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3851 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3852 }
3853 }
3854 }
3855
3856 return 0;
3857}
3858
3859static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3860{
3861 int i;
3862
3863 for (i = 0; clocks[i].flag; i++)
3864 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3865 (flags & clocks[i].flag) ? "On" : "Off");
3866}
3867
3868static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3869{
3870 struct drm_info_node *node = (struct drm_info_node *) m->private;
3871 struct drm_device *dev = node->minor->dev;
3872 struct amdgpu_device *adev = dev->dev_private;
3873 u32 flags = 0;
3874 int r;
3875
3876 if (adev->in_gpu_reset)
3877 return -EPERM;
3878
3879 r = pm_runtime_get_sync(dev->dev);
3880 if (r < 0) {
3881 pm_runtime_put_autosuspend(dev->dev);
3882 return r;
3883 }
3884
3885 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3886 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3887 amdgpu_parse_cg_state(m, flags);
3888 seq_printf(m, "\n");
3889
3890 if (!adev->pm.dpm_enabled) {
3891 seq_printf(m, "dpm not enabled\n");
3892 pm_runtime_mark_last_busy(dev->dev);
3893 pm_runtime_put_autosuspend(dev->dev);
3894 return 0;
3895 }
3896
3897 if (!is_support_sw_smu(adev) &&
3898 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3899 mutex_lock(&adev->pm.mutex);
3900 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3901 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3902 else
3903 seq_printf(m, "Debugfs support not implemented for this asic\n");
3904 mutex_unlock(&adev->pm.mutex);
3905 r = 0;
3906 } else {
3907 r = amdgpu_debugfs_pm_info_pp(m, adev);
3908 }
3909
3910 pm_runtime_mark_last_busy(dev->dev);
3911 pm_runtime_put_autosuspend(dev->dev);
3912
3913 return r;
3914}
3915
3916static const struct drm_info_list amdgpu_pm_info_list[] = {
3917 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3918};
3919#endif
3920
3921int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3922{
3923#if defined(CONFIG_DEBUG_FS)
3924 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3925#else
3926 return 0;
3927#endif
3928}
3929