1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30#include <linux/poll.h>
31#include <drm/drm_debugfs.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_dm_debugfs.h"
36#include "amdgpu_ras.h"
37#include "amdgpu_rap.h"
38
39
40
41
42
43
44
45
46
47int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
48 const struct drm_info_list *files,
49 unsigned nfiles)
50{
51 unsigned i;
52
53 for (i = 0; i < adev->debugfs_count; i++) {
54 if (adev->debugfs[i].files == files) {
55
56 return 0;
57 }
58 }
59
60 i = adev->debugfs_count + 1;
61 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
62 DRM_ERROR("Reached maximum number of debugfs components.\n");
63 DRM_ERROR("Report so we increase "
64 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
65 return -EINVAL;
66 }
67 adev->debugfs[adev->debugfs_count].files = files;
68 adev->debugfs[adev->debugfs_count].num_files = nfiles;
69 adev->debugfs_count = i;
70#if defined(CONFIG_DEBUG_FS)
71 drm_debugfs_create_files(files, nfiles,
72 adev_to_drm(adev)->primary->debugfs_root,
73 adev_to_drm(adev)->primary);
74#endif
75 return 0;
76}
77
78int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
79{
80#if defined(CONFIG_DEBUG_FS)
81 unsigned long timeout = 600 * HZ;
82 int ret;
83
84 wake_up_interruptible(&adev->autodump.gpu_hang);
85
86 ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
87 if (ret == 0) {
88 pr_err("autodump: timeout, move on to gpu recovery\n");
89 return -ETIMEDOUT;
90 }
91#endif
92 return 0;
93}
94
95#if defined(CONFIG_DEBUG_FS)
96
97static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
98{
99 struct amdgpu_device *adev = inode->i_private;
100 int ret;
101
102 file->private_data = adev;
103
104 ret = down_read_killable(&adev->reset_sem);
105 if (ret)
106 return ret;
107
108 if (adev->autodump.dumping.done) {
109 reinit_completion(&adev->autodump.dumping);
110 ret = 0;
111 } else {
112 ret = -EBUSY;
113 }
114
115 up_read(&adev->reset_sem);
116
117 return ret;
118}
119
120static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
121{
122 struct amdgpu_device *adev = file->private_data;
123
124 complete_all(&adev->autodump.dumping);
125 return 0;
126}
127
128static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
129{
130 struct amdgpu_device *adev = file->private_data;
131
132 poll_wait(file, &adev->autodump.gpu_hang, poll_table);
133
134 if (amdgpu_in_reset(adev))
135 return POLLIN | POLLRDNORM | POLLWRNORM;
136
137 return 0;
138}
139
140static const struct file_operations autodump_debug_fops = {
141 .owner = THIS_MODULE,
142 .open = amdgpu_debugfs_autodump_open,
143 .poll = amdgpu_debugfs_autodump_poll,
144 .release = amdgpu_debugfs_autodump_release,
145};
146
147static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
148{
149 init_completion(&adev->autodump.dumping);
150 complete_all(&adev->autodump.dumping);
151 init_waitqueue_head(&adev->autodump.gpu_hang);
152
153 debugfs_create_file("amdgpu_autodump", 0600,
154 adev_to_drm(adev)->primary->debugfs_root,
155 adev, &autodump_debug_fops);
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
186 char __user *buf, size_t size, loff_t *pos)
187{
188 struct amdgpu_device *adev = file_inode(f)->i_private;
189 ssize_t result = 0;
190 int r;
191 bool pm_pg_lock, use_bank, use_ring;
192 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
193
194 pm_pg_lock = use_bank = use_ring = false;
195 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
196
197 if (size & 0x3 || *pos & 0x3 ||
198 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
199 return -EINVAL;
200
201
202 pm_pg_lock = (*pos >> 23) & 1;
203
204 if (*pos & (1ULL << 62)) {
205 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
206 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
207 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
208
209 if (se_bank == 0x3FF)
210 se_bank = 0xFFFFFFFF;
211 if (sh_bank == 0x3FF)
212 sh_bank = 0xFFFFFFFF;
213 if (instance_bank == 0x3FF)
214 instance_bank = 0xFFFFFFFF;
215 use_bank = true;
216 } else if (*pos & (1ULL << 61)) {
217
218 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
219 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
220 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
221 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
222
223 use_ring = true;
224 } else {
225 use_bank = use_ring = false;
226 }
227
228 *pos &= (1UL << 22) - 1;
229
230 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
231 if (r < 0) {
232 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
233 return r;
234 }
235
236 r = amdgpu_virt_enable_access_debugfs(adev);
237 if (r < 0) {
238 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
239 return r;
240 }
241
242 if (use_bank) {
243 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
244 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
245 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
246 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
247 amdgpu_virt_disable_access_debugfs(adev);
248 return -EINVAL;
249 }
250 mutex_lock(&adev->grbm_idx_mutex);
251 amdgpu_gfx_select_se_sh(adev, se_bank,
252 sh_bank, instance_bank);
253 } else if (use_ring) {
254 mutex_lock(&adev->srbm_mutex);
255 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
256 }
257
258 if (pm_pg_lock)
259 mutex_lock(&adev->pm.mutex);
260
261 while (size) {
262 uint32_t value;
263
264 if (read) {
265 value = RREG32(*pos >> 2);
266 r = put_user(value, (uint32_t *)buf);
267 } else {
268 r = get_user(value, (uint32_t *)buf);
269 if (!r)
270 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
271 }
272 if (r) {
273 result = r;
274 goto end;
275 }
276
277 result += 4;
278 buf += 4;
279 *pos += 4;
280 size -= 4;
281 }
282
283end:
284 if (use_bank) {
285 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
286 mutex_unlock(&adev->grbm_idx_mutex);
287 } else if (use_ring) {
288 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
289 mutex_unlock(&adev->srbm_mutex);
290 }
291
292 if (pm_pg_lock)
293 mutex_unlock(&adev->pm.mutex);
294
295 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
296 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
297
298 amdgpu_virt_disable_access_debugfs(adev);
299 return result;
300}
301
302
303
304
305static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
306 size_t size, loff_t *pos)
307{
308 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
309}
310
311
312
313
314static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
315 size_t size, loff_t *pos)
316{
317 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
334 size_t size, loff_t *pos)
335{
336 struct amdgpu_device *adev = file_inode(f)->i_private;
337 ssize_t result = 0;
338 int r;
339
340 if (size & 0x3 || *pos & 0x3)
341 return -EINVAL;
342
343 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
344 if (r < 0) {
345 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
346 return r;
347 }
348
349 r = amdgpu_virt_enable_access_debugfs(adev);
350 if (r < 0) {
351 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
352 return r;
353 }
354
355 while (size) {
356 uint32_t value;
357
358 value = RREG32_PCIE(*pos >> 2);
359 r = put_user(value, (uint32_t *)buf);
360 if (r) {
361 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
362 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
363 amdgpu_virt_disable_access_debugfs(adev);
364 return r;
365 }
366
367 result += 4;
368 buf += 4;
369 *pos += 4;
370 size -= 4;
371 }
372
373 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
374 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
375
376 amdgpu_virt_disable_access_debugfs(adev);
377 return result;
378}
379
380
381
382
383
384
385
386
387
388
389
390
391
392static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
393 size_t size, loff_t *pos)
394{
395 struct amdgpu_device *adev = file_inode(f)->i_private;
396 ssize_t result = 0;
397 int r;
398
399 if (size & 0x3 || *pos & 0x3)
400 return -EINVAL;
401
402 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
403 if (r < 0) {
404 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
405 return r;
406 }
407
408 r = amdgpu_virt_enable_access_debugfs(adev);
409 if (r < 0) {
410 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
411 return r;
412 }
413
414 while (size) {
415 uint32_t value;
416
417 r = get_user(value, (uint32_t *)buf);
418 if (r) {
419 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
420 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
421 amdgpu_virt_disable_access_debugfs(adev);
422 return r;
423 }
424
425 WREG32_PCIE(*pos >> 2, value);
426
427 result += 4;
428 buf += 4;
429 *pos += 4;
430 size -= 4;
431 }
432
433 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
434 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
435
436 amdgpu_virt_disable_access_debugfs(adev);
437 return result;
438}
439
440
441
442
443
444
445
446
447
448
449
450
451
452static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
453 size_t size, loff_t *pos)
454{
455 struct amdgpu_device *adev = file_inode(f)->i_private;
456 ssize_t result = 0;
457 int r;
458
459 if (size & 0x3 || *pos & 0x3)
460 return -EINVAL;
461
462 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
463 if (r < 0) {
464 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
465 return r;
466 }
467
468 r = amdgpu_virt_enable_access_debugfs(adev);
469 if (r < 0) {
470 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
471 return r;
472 }
473
474 while (size) {
475 uint32_t value;
476
477 value = RREG32_DIDT(*pos >> 2);
478 r = put_user(value, (uint32_t *)buf);
479 if (r) {
480 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
481 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
482 amdgpu_virt_disable_access_debugfs(adev);
483 return r;
484 }
485
486 result += 4;
487 buf += 4;
488 *pos += 4;
489 size -= 4;
490 }
491
492 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
493 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
494
495 amdgpu_virt_disable_access_debugfs(adev);
496 return result;
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
512 size_t size, loff_t *pos)
513{
514 struct amdgpu_device *adev = file_inode(f)->i_private;
515 ssize_t result = 0;
516 int r;
517
518 if (size & 0x3 || *pos & 0x3)
519 return -EINVAL;
520
521 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
522 if (r < 0) {
523 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
524 return r;
525 }
526
527 r = amdgpu_virt_enable_access_debugfs(adev);
528 if (r < 0) {
529 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
530 return r;
531 }
532
533 while (size) {
534 uint32_t value;
535
536 r = get_user(value, (uint32_t *)buf);
537 if (r) {
538 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
539 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
540 amdgpu_virt_disable_access_debugfs(adev);
541 return r;
542 }
543
544 WREG32_DIDT(*pos >> 2, value);
545
546 result += 4;
547 buf += 4;
548 *pos += 4;
549 size -= 4;
550 }
551
552 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
553 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
554
555 amdgpu_virt_disable_access_debugfs(adev);
556 return result;
557}
558
559
560
561
562
563
564
565
566
567
568
569
570
571static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
572 size_t size, loff_t *pos)
573{
574 struct amdgpu_device *adev = file_inode(f)->i_private;
575 ssize_t result = 0;
576 int r;
577
578 if (size & 0x3 || *pos & 0x3)
579 return -EINVAL;
580
581 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
582 if (r < 0) {
583 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
584 return r;
585 }
586
587 r = amdgpu_virt_enable_access_debugfs(adev);
588 if (r < 0) {
589 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
590 return r;
591 }
592
593 while (size) {
594 uint32_t value;
595
596 value = RREG32_SMC(*pos);
597 r = put_user(value, (uint32_t *)buf);
598 if (r) {
599 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
600 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
601 amdgpu_virt_disable_access_debugfs(adev);
602 return r;
603 }
604
605 result += 4;
606 buf += 4;
607 *pos += 4;
608 size -= 4;
609 }
610
611 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
612 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
613
614 amdgpu_virt_disable_access_debugfs(adev);
615 return result;
616}
617
618
619
620
621
622
623
624
625
626
627
628
629
630static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
631 size_t size, loff_t *pos)
632{
633 struct amdgpu_device *adev = file_inode(f)->i_private;
634 ssize_t result = 0;
635 int r;
636
637 if (size & 0x3 || *pos & 0x3)
638 return -EINVAL;
639
640 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
641 if (r < 0) {
642 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
643 return r;
644 }
645
646 r = amdgpu_virt_enable_access_debugfs(adev);
647 if (r < 0) {
648 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
649 return r;
650 }
651
652 while (size) {
653 uint32_t value;
654
655 r = get_user(value, (uint32_t *)buf);
656 if (r) {
657 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
658 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
659 amdgpu_virt_disable_access_debugfs(adev);
660 return r;
661 }
662
663 WREG32_SMC(*pos, value);
664
665 result += 4;
666 buf += 4;
667 *pos += 4;
668 size -= 4;
669 }
670
671 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
672 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
673
674 amdgpu_virt_disable_access_debugfs(adev);
675 return result;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
693 size_t size, loff_t *pos)
694{
695 struct amdgpu_device *adev = file_inode(f)->i_private;
696 ssize_t result = 0;
697 int r;
698 uint32_t *config, no_regs = 0;
699
700 if (size & 0x3 || *pos & 0x3)
701 return -EINVAL;
702
703 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
704 if (!config)
705 return -ENOMEM;
706
707
708 config[no_regs++] = 3;
709 config[no_regs++] = adev->gfx.config.max_shader_engines;
710 config[no_regs++] = adev->gfx.config.max_tile_pipes;
711 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
712 config[no_regs++] = adev->gfx.config.max_sh_per_se;
713 config[no_regs++] = adev->gfx.config.max_backends_per_se;
714 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
715 config[no_regs++] = adev->gfx.config.max_gprs;
716 config[no_regs++] = adev->gfx.config.max_gs_threads;
717 config[no_regs++] = adev->gfx.config.max_hw_contexts;
718 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
719 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
720 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
721 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
722 config[no_regs++] = adev->gfx.config.num_tile_pipes;
723 config[no_regs++] = adev->gfx.config.backend_enable_mask;
724 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
725 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
726 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
727 config[no_regs++] = adev->gfx.config.num_gpus;
728 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
729 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
730 config[no_regs++] = adev->gfx.config.gb_addr_config;
731 config[no_regs++] = adev->gfx.config.num_rbs;
732
733
734 config[no_regs++] = adev->rev_id;
735 config[no_regs++] = adev->pg_flags;
736 config[no_regs++] = adev->cg_flags;
737
738
739 config[no_regs++] = adev->family;
740 config[no_regs++] = adev->external_rev_id;
741
742
743 config[no_regs++] = adev->pdev->device;
744 config[no_regs++] = adev->pdev->revision;
745 config[no_regs++] = adev->pdev->subsystem_device;
746 config[no_regs++] = adev->pdev->subsystem_vendor;
747
748 while (size && (*pos < no_regs * 4)) {
749 uint32_t value;
750
751 value = config[*pos >> 2];
752 r = put_user(value, (uint32_t *)buf);
753 if (r) {
754 kfree(config);
755 return r;
756 }
757
758 result += 4;
759 buf += 4;
760 *pos += 4;
761 size -= 4;
762 }
763
764 kfree(config);
765 return result;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
782 size_t size, loff_t *pos)
783{
784 struct amdgpu_device *adev = file_inode(f)->i_private;
785 int idx, x, outsize, r, valuesize;
786 uint32_t values[16];
787
788 if (size & 3 || *pos & 0x3)
789 return -EINVAL;
790
791 if (!adev->pm.dpm_enabled)
792 return -EINVAL;
793
794
795 idx = *pos >> 2;
796
797 valuesize = sizeof(values);
798
799 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
800 if (r < 0) {
801 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
802 return r;
803 }
804
805 r = amdgpu_virt_enable_access_debugfs(adev);
806 if (r < 0) {
807 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
808 return r;
809 }
810
811 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
812
813 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
814 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
815
816 if (r) {
817 amdgpu_virt_disable_access_debugfs(adev);
818 return r;
819 }
820
821 if (size > valuesize) {
822 amdgpu_virt_disable_access_debugfs(adev);
823 return -EINVAL;
824 }
825
826 outsize = 0;
827 x = 0;
828 if (!r) {
829 while (size) {
830 r = put_user(values[x++], (int32_t *)buf);
831 buf += 4;
832 size -= 4;
833 outsize += 4;
834 }
835 }
836
837 amdgpu_virt_disable_access_debugfs(adev);
838 return !r ? outsize : r;
839}
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
863 size_t size, loff_t *pos)
864{
865 struct amdgpu_device *adev = f->f_inode->i_private;
866 int r, x;
867 ssize_t result=0;
868 uint32_t offset, se, sh, cu, wave, simd, data[32];
869
870 if (size & 3 || *pos & 3)
871 return -EINVAL;
872
873
874 offset = (*pos & GENMASK_ULL(6, 0));
875 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
876 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
877 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
878 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
879 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
880
881 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
882 if (r < 0) {
883 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
884 return r;
885 }
886
887 r = amdgpu_virt_enable_access_debugfs(adev);
888 if (r < 0) {
889 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
890 return r;
891 }
892
893
894 mutex_lock(&adev->grbm_idx_mutex);
895 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
896
897 x = 0;
898 if (adev->gfx.funcs->read_wave_data)
899 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
900
901 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
902 mutex_unlock(&adev->grbm_idx_mutex);
903
904 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
905 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
906
907 if (!x) {
908 amdgpu_virt_disable_access_debugfs(adev);
909 return -EINVAL;
910 }
911
912 while (size && (offset < x * 4)) {
913 uint32_t value;
914
915 value = data[offset >> 2];
916 r = put_user(value, (uint32_t *)buf);
917 if (r) {
918 amdgpu_virt_disable_access_debugfs(adev);
919 return r;
920 }
921
922 result += 4;
923 buf += 4;
924 offset += 4;
925 size -= 4;
926 }
927
928 amdgpu_virt_disable_access_debugfs(adev);
929 return result;
930}
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
955 size_t size, loff_t *pos)
956{
957 struct amdgpu_device *adev = f->f_inode->i_private;
958 int r;
959 ssize_t result = 0;
960 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
961
962 if (size > 4096 || size & 3 || *pos & 3)
963 return -EINVAL;
964
965
966 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
967 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
968 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
969 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
970 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
971 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
972 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
973 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
974
975 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
976 if (!data)
977 return -ENOMEM;
978
979 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
980 if (r < 0)
981 goto err;
982
983 r = amdgpu_virt_enable_access_debugfs(adev);
984 if (r < 0)
985 goto err;
986
987
988 mutex_lock(&adev->grbm_idx_mutex);
989 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
990
991 if (bank == 0) {
992 if (adev->gfx.funcs->read_wave_vgprs)
993 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
994 } else {
995 if (adev->gfx.funcs->read_wave_sgprs)
996 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
997 }
998
999 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1000 mutex_unlock(&adev->grbm_idx_mutex);
1001
1002 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1003 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1004
1005 while (size) {
1006 uint32_t value;
1007
1008 value = data[result >> 2];
1009 r = put_user(value, (uint32_t *)buf);
1010 if (r) {
1011 amdgpu_virt_disable_access_debugfs(adev);
1012 goto err;
1013 }
1014
1015 result += 4;
1016 buf += 4;
1017 size -= 4;
1018 }
1019
1020 kfree(data);
1021 amdgpu_virt_disable_access_debugfs(adev);
1022 return result;
1023
1024err:
1025 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1026 kfree(data);
1027 return r;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1041 size_t size, loff_t *pos)
1042{
1043 struct amdgpu_device *adev = file_inode(f)->i_private;
1044 ssize_t result = 0;
1045 int r;
1046
1047 if (size & 0x3 || *pos & 0x3)
1048 return -EINVAL;
1049
1050 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1051 if (r < 0) {
1052 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1053 return r;
1054 }
1055
1056 while (size) {
1057 uint32_t value;
1058
1059 r = get_user(value, (uint32_t *)buf);
1060 if (r) {
1061 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1062 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1063 return r;
1064 }
1065
1066 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1067
1068 result += 4;
1069 buf += 4;
1070 *pos += 4;
1071 size -= 4;
1072 }
1073
1074 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1075 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1076
1077 return result;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1090 size_t size, loff_t *pos)
1091{
1092 struct amdgpu_device *adev = file_inode(f)->i_private;
1093 ssize_t result = 0;
1094 int r;
1095
1096 if (size & 0x3 || *pos & 0x3)
1097 return -EINVAL;
1098
1099 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1100 if (r < 0)
1101 return r;
1102
1103 while (size) {
1104 uint32_t value;
1105
1106 r = amdgpu_get_gfx_off_status(adev, &value);
1107 if (r) {
1108 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1109 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1110 return r;
1111 }
1112
1113 r = put_user(value, (uint32_t *)buf);
1114 if (r) {
1115 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1116 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1117 return r;
1118 }
1119
1120 result += 4;
1121 buf += 4;
1122 *pos += 4;
1123 size -= 4;
1124 }
1125
1126 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1127 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1128
1129 return result;
1130}
1131
1132static const struct file_operations amdgpu_debugfs_regs_fops = {
1133 .owner = THIS_MODULE,
1134 .read = amdgpu_debugfs_regs_read,
1135 .write = amdgpu_debugfs_regs_write,
1136 .llseek = default_llseek
1137};
1138static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1139 .owner = THIS_MODULE,
1140 .read = amdgpu_debugfs_regs_didt_read,
1141 .write = amdgpu_debugfs_regs_didt_write,
1142 .llseek = default_llseek
1143};
1144static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1145 .owner = THIS_MODULE,
1146 .read = amdgpu_debugfs_regs_pcie_read,
1147 .write = amdgpu_debugfs_regs_pcie_write,
1148 .llseek = default_llseek
1149};
1150static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1151 .owner = THIS_MODULE,
1152 .read = amdgpu_debugfs_regs_smc_read,
1153 .write = amdgpu_debugfs_regs_smc_write,
1154 .llseek = default_llseek
1155};
1156
1157static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1158 .owner = THIS_MODULE,
1159 .read = amdgpu_debugfs_gca_config_read,
1160 .llseek = default_llseek
1161};
1162
1163static const struct file_operations amdgpu_debugfs_sensors_fops = {
1164 .owner = THIS_MODULE,
1165 .read = amdgpu_debugfs_sensor_read,
1166 .llseek = default_llseek
1167};
1168
1169static const struct file_operations amdgpu_debugfs_wave_fops = {
1170 .owner = THIS_MODULE,
1171 .read = amdgpu_debugfs_wave_read,
1172 .llseek = default_llseek
1173};
1174static const struct file_operations amdgpu_debugfs_gpr_fops = {
1175 .owner = THIS_MODULE,
1176 .read = amdgpu_debugfs_gpr_read,
1177 .llseek = default_llseek
1178};
1179
1180static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1181 .owner = THIS_MODULE,
1182 .read = amdgpu_debugfs_gfxoff_read,
1183 .write = amdgpu_debugfs_gfxoff_write,
1184 .llseek = default_llseek
1185};
1186
1187static const struct file_operations *debugfs_regs[] = {
1188 &amdgpu_debugfs_regs_fops,
1189 &amdgpu_debugfs_regs_didt_fops,
1190 &amdgpu_debugfs_regs_pcie_fops,
1191 &amdgpu_debugfs_regs_smc_fops,
1192 &amdgpu_debugfs_gca_config_fops,
1193 &amdgpu_debugfs_sensors_fops,
1194 &amdgpu_debugfs_wave_fops,
1195 &amdgpu_debugfs_gpr_fops,
1196 &amdgpu_debugfs_gfxoff_fops,
1197};
1198
1199static const char *debugfs_regs_names[] = {
1200 "amdgpu_regs",
1201 "amdgpu_regs_didt",
1202 "amdgpu_regs_pcie",
1203 "amdgpu_regs_smc",
1204 "amdgpu_gca_config",
1205 "amdgpu_sensors",
1206 "amdgpu_wave",
1207 "amdgpu_gpr",
1208 "amdgpu_gfxoff",
1209};
1210
1211
1212
1213
1214
1215
1216
1217int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1218{
1219 struct drm_minor *minor = adev_to_drm(adev)->primary;
1220 struct dentry *ent, *root = minor->debugfs_root;
1221 unsigned int i;
1222
1223 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1224 ent = debugfs_create_file(debugfs_regs_names[i],
1225 S_IFREG | S_IRUGO, root,
1226 adev, debugfs_regs[i]);
1227 if (!i && !IS_ERR_OR_NULL(ent))
1228 i_size_write(ent->d_inode, adev->rmmio_size);
1229 adev->debugfs_regs[i] = ent;
1230 }
1231
1232 return 0;
1233}
1234
1235static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
1236{
1237 struct drm_info_node *node = (struct drm_info_node *) m->private;
1238 struct drm_device *dev = node->minor->dev;
1239 struct amdgpu_device *adev = drm_to_adev(dev);
1240 int r = 0, i;
1241
1242 r = pm_runtime_get_sync(dev->dev);
1243 if (r < 0) {
1244 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1245 return r;
1246 }
1247
1248
1249 r = down_read_killable(&adev->reset_sem);
1250 if (r)
1251 return r;
1252
1253
1254 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1255 struct amdgpu_ring *ring = adev->rings[i];
1256
1257 if (!ring || !ring->sched.thread)
1258 continue;
1259 kthread_park(ring->sched.thread);
1260 }
1261
1262 seq_printf(m, "run ib test:\n");
1263 r = amdgpu_ib_ring_tests(adev);
1264 if (r)
1265 seq_printf(m, "ib ring tests failed (%d).\n", r);
1266 else
1267 seq_printf(m, "ib ring tests passed.\n");
1268
1269
1270 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1271 struct amdgpu_ring *ring = adev->rings[i];
1272
1273 if (!ring || !ring->sched.thread)
1274 continue;
1275 kthread_unpark(ring->sched.thread);
1276 }
1277
1278 up_read(&adev->reset_sem);
1279
1280 pm_runtime_mark_last_busy(dev->dev);
1281 pm_runtime_put_autosuspend(dev->dev);
1282
1283 return 0;
1284}
1285
1286static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1287{
1288 struct drm_info_node *node = (struct drm_info_node *) m->private;
1289 struct drm_device *dev = node->minor->dev;
1290 struct amdgpu_device *adev = drm_to_adev(dev);
1291
1292 seq_write(m, adev->bios, adev->bios_size);
1293 return 0;
1294}
1295
1296static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1297{
1298 struct drm_info_node *node = (struct drm_info_node *)m->private;
1299 struct drm_device *dev = node->minor->dev;
1300 struct amdgpu_device *adev = drm_to_adev(dev);
1301 int r;
1302
1303 r = pm_runtime_get_sync(dev->dev);
1304 if (r < 0) {
1305 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1306 return r;
1307 }
1308
1309 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1310
1311 pm_runtime_mark_last_busy(dev->dev);
1312 pm_runtime_put_autosuspend(dev->dev);
1313
1314 return 0;
1315}
1316
1317static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1318{
1319 struct drm_info_node *node = (struct drm_info_node *)m->private;
1320 struct drm_device *dev = node->minor->dev;
1321 struct amdgpu_device *adev = drm_to_adev(dev);
1322 int r;
1323
1324 r = pm_runtime_get_sync(dev->dev);
1325 if (r < 0) {
1326 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1327 return r;
1328 }
1329
1330 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
1331
1332 pm_runtime_mark_last_busy(dev->dev);
1333 pm_runtime_put_autosuspend(dev->dev);
1334
1335 return 0;
1336}
1337
1338static const struct drm_info_list amdgpu_debugfs_list[] = {
1339 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1340 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1341 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1342 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1343};
1344
1345static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1346 struct dma_fence **fences)
1347{
1348 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1349 uint32_t sync_seq, last_seq;
1350
1351 last_seq = atomic_read(&ring->fence_drv.last_seq);
1352 sync_seq = ring->fence_drv.sync_seq;
1353
1354 last_seq &= drv->num_fences_mask;
1355 sync_seq &= drv->num_fences_mask;
1356
1357 do {
1358 struct dma_fence *fence, **ptr;
1359
1360 ++last_seq;
1361 last_seq &= drv->num_fences_mask;
1362 ptr = &drv->fences[last_seq];
1363
1364 fence = rcu_dereference_protected(*ptr, 1);
1365 RCU_INIT_POINTER(*ptr, NULL);
1366
1367 if (!fence)
1368 continue;
1369
1370 fences[last_seq] = fence;
1371
1372 } while (last_seq != sync_seq);
1373}
1374
1375static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1376 int length)
1377{
1378 int i;
1379 struct dma_fence *fence;
1380
1381 for (i = 0; i < length; i++) {
1382 fence = fences[i];
1383 if (!fence)
1384 continue;
1385 dma_fence_signal(fence);
1386 dma_fence_put(fence);
1387 }
1388}
1389
1390static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1391{
1392 struct drm_sched_job *s_job;
1393 struct dma_fence *fence;
1394
1395 spin_lock(&sched->job_list_lock);
1396 list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1397 fence = sched->ops->run_job(s_job);
1398 dma_fence_put(fence);
1399 }
1400 spin_unlock(&sched->job_list_lock);
1401}
1402
1403static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1404{
1405 struct amdgpu_job *job;
1406 struct drm_sched_job *s_job, *tmp;
1407 uint32_t preempt_seq;
1408 struct dma_fence *fence, **ptr;
1409 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1410 struct drm_gpu_scheduler *sched = &ring->sched;
1411 bool preempted = true;
1412
1413 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1414 return;
1415
1416 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1417 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1418 preempted = false;
1419 goto no_preempt;
1420 }
1421
1422 preempt_seq &= drv->num_fences_mask;
1423 ptr = &drv->fences[preempt_seq];
1424 fence = rcu_dereference_protected(*ptr, 1);
1425
1426no_preempt:
1427 spin_lock(&sched->job_list_lock);
1428 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
1429 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1430
1431 list_del_init(&s_job->node);
1432 sched->ops->free_job(s_job);
1433 continue;
1434 }
1435 job = to_amdgpu_job(s_job);
1436 if (preempted && job->fence == fence)
1437
1438 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1439 }
1440 spin_unlock(&sched->job_list_lock);
1441}
1442
1443static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1444{
1445 int r, resched, length;
1446 struct amdgpu_ring *ring;
1447 struct dma_fence **fences = NULL;
1448 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1449
1450 if (val >= AMDGPU_MAX_RINGS)
1451 return -EINVAL;
1452
1453 ring = adev->rings[val];
1454
1455 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1456 return -EINVAL;
1457
1458
1459 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1460 return -EBUSY;
1461
1462 length = ring->fence_drv.num_fences_mask + 1;
1463 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1464 if (!fences)
1465 return -ENOMEM;
1466
1467
1468 r = down_read_killable(&adev->reset_sem);
1469 if (r)
1470 goto pro_end;
1471
1472
1473 kthread_park(ring->sched.thread);
1474
1475 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1476
1477
1478 r = amdgpu_ring_preempt_ib(ring);
1479 if (r) {
1480 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1481 goto failure;
1482 }
1483
1484 amdgpu_fence_process(ring);
1485
1486 if (atomic_read(&ring->fence_drv.last_seq) !=
1487 ring->fence_drv.sync_seq) {
1488 DRM_INFO("ring %d was preempted\n", ring->idx);
1489
1490 amdgpu_ib_preempt_mark_partial_job(ring);
1491
1492
1493 amdgpu_ib_preempt_fences_swap(ring, fences);
1494
1495 amdgpu_fence_driver_force_completion(ring);
1496
1497
1498 amdgpu_ib_preempt_job_recovery(&ring->sched);
1499
1500
1501 amdgpu_fence_wait_empty(ring);
1502
1503
1504 amdgpu_ib_preempt_signal_fences(fences, length);
1505 }
1506
1507failure:
1508
1509 kthread_unpark(ring->sched.thread);
1510
1511 up_read(&adev->reset_sem);
1512
1513 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1514
1515pro_end:
1516 kfree(fences);
1517
1518 return r;
1519}
1520
1521static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1522{
1523 int ret = 0;
1524 uint32_t max_freq, min_freq;
1525 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1526
1527 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1528 return -EINVAL;
1529
1530 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1531 if (ret < 0) {
1532 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1533 return ret;
1534 }
1535
1536 if (is_support_sw_smu(adev)) {
1537 ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
1538 if (ret || val > max_freq || val < min_freq)
1539 return -EINVAL;
1540 ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
1541 } else {
1542 return 0;
1543 }
1544
1545 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1546 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1547
1548 if (ret)
1549 return -EINVAL;
1550
1551 return 0;
1552}
1553
1554DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1555 amdgpu_debugfs_ib_preempt, "%llu\n");
1556
1557DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1558 amdgpu_debugfs_sclk_set, "%llu\n");
1559
1560int amdgpu_debugfs_init(struct amdgpu_device *adev)
1561{
1562 int r, i;
1563
1564 adev->debugfs_preempt =
1565 debugfs_create_file("amdgpu_preempt_ib", 0600,
1566 adev_to_drm(adev)->primary->debugfs_root, adev,
1567 &fops_ib_preempt);
1568 if (!(adev->debugfs_preempt)) {
1569 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1570 return -EIO;
1571 }
1572
1573 adev->smu.debugfs_sclk =
1574 debugfs_create_file("amdgpu_force_sclk", 0200,
1575 adev_to_drm(adev)->primary->debugfs_root, adev,
1576 &fops_sclk_set);
1577 if (!(adev->smu.debugfs_sclk)) {
1578 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1579 return -EIO;
1580 }
1581
1582
1583 r = amdgpu_ttm_debugfs_init(adev);
1584 if (r) {
1585 DRM_ERROR("Failed to init debugfs\n");
1586 return r;
1587 }
1588
1589 r = amdgpu_debugfs_pm_init(adev);
1590 if (r) {
1591 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1592 return r;
1593 }
1594
1595 if (amdgpu_debugfs_sa_init(adev)) {
1596 dev_err(adev->dev, "failed to register debugfs file for SA\n");
1597 }
1598
1599 if (amdgpu_debugfs_fence_init(adev))
1600 dev_err(adev->dev, "fence debugfs file creation failed\n");
1601
1602 r = amdgpu_debugfs_gem_init(adev);
1603 if (r)
1604 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1605
1606 r = amdgpu_debugfs_regs_init(adev);
1607 if (r)
1608 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1609
1610 r = amdgpu_debugfs_firmware_init(adev);
1611 if (r)
1612 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1613
1614#if defined(CONFIG_DRM_AMD_DC)
1615 if (amdgpu_device_has_dc_support(adev)) {
1616 if (dtn_debugfs_init(adev))
1617 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1618 }
1619#endif
1620
1621 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1622 struct amdgpu_ring *ring = adev->rings[i];
1623
1624 if (!ring)
1625 continue;
1626
1627 if (amdgpu_debugfs_ring_init(adev, ring)) {
1628 DRM_ERROR("Failed to register debugfs file for rings !\n");
1629 }
1630 }
1631
1632 amdgpu_ras_debugfs_create_all(adev);
1633
1634 amdgpu_debugfs_autodump_init(adev);
1635
1636 amdgpu_rap_debugfs_init(adev);
1637
1638 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1639 ARRAY_SIZE(amdgpu_debugfs_list));
1640}
1641
1642#else
1643int amdgpu_debugfs_init(struct amdgpu_device *adev)
1644{
1645 return 0;
1646}
1647int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1648{
1649 return 0;
1650}
1651#endif
1652