1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30#include <linux/poll.h>
31#include <drm/drm_debugfs.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_dm_debugfs.h"
36#include "amdgpu_ras.h"
37#include "amdgpu_rap.h"
38#include "amdgpu_fw_attestation.h"
39
40
41
42
43
44
45
46
47
48int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
49 const struct drm_info_list *files,
50 unsigned nfiles)
51{
52 unsigned i;
53
54 for (i = 0; i < adev->debugfs_count; i++) {
55 if (adev->debugfs[i].files == files) {
56
57 return 0;
58 }
59 }
60
61 i = adev->debugfs_count + 1;
62 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
63 DRM_ERROR("Reached maximum number of debugfs components.\n");
64 DRM_ERROR("Report so we increase "
65 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
66 return -EINVAL;
67 }
68 adev->debugfs[adev->debugfs_count].files = files;
69 adev->debugfs[adev->debugfs_count].num_files = nfiles;
70 adev->debugfs_count = i;
71#if defined(CONFIG_DEBUG_FS)
72 drm_debugfs_create_files(files, nfiles,
73 adev_to_drm(adev)->primary->debugfs_root,
74 adev_to_drm(adev)->primary);
75#endif
76 return 0;
77}
78
79int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
80{
81#if defined(CONFIG_DEBUG_FS)
82 unsigned long timeout = 600 * HZ;
83 int ret;
84
85 wake_up_interruptible(&adev->autodump.gpu_hang);
86
87 ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
88 if (ret == 0) {
89 pr_err("autodump: timeout, move on to gpu recovery\n");
90 return -ETIMEDOUT;
91 }
92#endif
93 return 0;
94}
95
96#if defined(CONFIG_DEBUG_FS)
97
98static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
99{
100 struct amdgpu_device *adev = inode->i_private;
101 int ret;
102
103 file->private_data = adev;
104
105 ret = down_read_killable(&adev->reset_sem);
106 if (ret)
107 return ret;
108
109 if (adev->autodump.dumping.done) {
110 reinit_completion(&adev->autodump.dumping);
111 ret = 0;
112 } else {
113 ret = -EBUSY;
114 }
115
116 up_read(&adev->reset_sem);
117
118 return ret;
119}
120
121static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
122{
123 struct amdgpu_device *adev = file->private_data;
124
125 complete_all(&adev->autodump.dumping);
126 return 0;
127}
128
129static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
130{
131 struct amdgpu_device *adev = file->private_data;
132
133 poll_wait(file, &adev->autodump.gpu_hang, poll_table);
134
135 if (amdgpu_in_reset(adev))
136 return POLLIN | POLLRDNORM | POLLWRNORM;
137
138 return 0;
139}
140
141static const struct file_operations autodump_debug_fops = {
142 .owner = THIS_MODULE,
143 .open = amdgpu_debugfs_autodump_open,
144 .poll = amdgpu_debugfs_autodump_poll,
145 .release = amdgpu_debugfs_autodump_release,
146};
147
148static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
149{
150 init_completion(&adev->autodump.dumping);
151 complete_all(&adev->autodump.dumping);
152 init_waitqueue_head(&adev->autodump.gpu_hang);
153
154 debugfs_create_file("amdgpu_autodump", 0600,
155 adev_to_drm(adev)->primary->debugfs_root,
156 adev, &autodump_debug_fops);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
187 char __user *buf, size_t size, loff_t *pos)
188{
189 struct amdgpu_device *adev = file_inode(f)->i_private;
190 ssize_t result = 0;
191 int r;
192 bool pm_pg_lock, use_bank, use_ring;
193 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
194
195 pm_pg_lock = use_bank = use_ring = false;
196 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
197
198 if (size & 0x3 || *pos & 0x3 ||
199 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
200 return -EINVAL;
201
202
203 pm_pg_lock = (*pos >> 23) & 1;
204
205 if (*pos & (1ULL << 62)) {
206 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
207 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
208 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
209
210 if (se_bank == 0x3FF)
211 se_bank = 0xFFFFFFFF;
212 if (sh_bank == 0x3FF)
213 sh_bank = 0xFFFFFFFF;
214 if (instance_bank == 0x3FF)
215 instance_bank = 0xFFFFFFFF;
216 use_bank = true;
217 } else if (*pos & (1ULL << 61)) {
218
219 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
220 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
221 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
222 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
223
224 use_ring = true;
225 } else {
226 use_bank = use_ring = false;
227 }
228
229 *pos &= (1UL << 22) - 1;
230
231 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
232 if (r < 0) {
233 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
234 return r;
235 }
236
237 r = amdgpu_virt_enable_access_debugfs(adev);
238 if (r < 0) {
239 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
240 return r;
241 }
242
243 if (use_bank) {
244 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
245 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
246 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
247 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
248 amdgpu_virt_disable_access_debugfs(adev);
249 return -EINVAL;
250 }
251 mutex_lock(&adev->grbm_idx_mutex);
252 amdgpu_gfx_select_se_sh(adev, se_bank,
253 sh_bank, instance_bank);
254 } else if (use_ring) {
255 mutex_lock(&adev->srbm_mutex);
256 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
257 }
258
259 if (pm_pg_lock)
260 mutex_lock(&adev->pm.mutex);
261
262 while (size) {
263 uint32_t value;
264
265 if (read) {
266 value = RREG32(*pos >> 2);
267 r = put_user(value, (uint32_t *)buf);
268 } else {
269 r = get_user(value, (uint32_t *)buf);
270 if (!r)
271 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
272 }
273 if (r) {
274 result = r;
275 goto end;
276 }
277
278 result += 4;
279 buf += 4;
280 *pos += 4;
281 size -= 4;
282 }
283
284end:
285 if (use_bank) {
286 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
287 mutex_unlock(&adev->grbm_idx_mutex);
288 } else if (use_ring) {
289 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
290 mutex_unlock(&adev->srbm_mutex);
291 }
292
293 if (pm_pg_lock)
294 mutex_unlock(&adev->pm.mutex);
295
296 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
297 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
298
299 amdgpu_virt_disable_access_debugfs(adev);
300 return result;
301}
302
303
304
305
306static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
307 size_t size, loff_t *pos)
308{
309 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
310}
311
312
313
314
315static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
316 size_t size, loff_t *pos)
317{
318 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
319}
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
335 size_t size, loff_t *pos)
336{
337 struct amdgpu_device *adev = file_inode(f)->i_private;
338 ssize_t result = 0;
339 int r;
340
341 if (size & 0x3 || *pos & 0x3)
342 return -EINVAL;
343
344 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
345 if (r < 0) {
346 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
347 return r;
348 }
349
350 r = amdgpu_virt_enable_access_debugfs(adev);
351 if (r < 0) {
352 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
353 return r;
354 }
355
356 while (size) {
357 uint32_t value;
358
359 value = RREG32_PCIE(*pos >> 2);
360 r = put_user(value, (uint32_t *)buf);
361 if (r) {
362 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
363 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
364 amdgpu_virt_disable_access_debugfs(adev);
365 return r;
366 }
367
368 result += 4;
369 buf += 4;
370 *pos += 4;
371 size -= 4;
372 }
373
374 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
375 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
376
377 amdgpu_virt_disable_access_debugfs(adev);
378 return result;
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
394 size_t size, loff_t *pos)
395{
396 struct amdgpu_device *adev = file_inode(f)->i_private;
397 ssize_t result = 0;
398 int r;
399
400 if (size & 0x3 || *pos & 0x3)
401 return -EINVAL;
402
403 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
404 if (r < 0) {
405 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
406 return r;
407 }
408
409 r = amdgpu_virt_enable_access_debugfs(adev);
410 if (r < 0) {
411 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
412 return r;
413 }
414
415 while (size) {
416 uint32_t value;
417
418 r = get_user(value, (uint32_t *)buf);
419 if (r) {
420 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
421 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
422 amdgpu_virt_disable_access_debugfs(adev);
423 return r;
424 }
425
426 WREG32_PCIE(*pos >> 2, value);
427
428 result += 4;
429 buf += 4;
430 *pos += 4;
431 size -= 4;
432 }
433
434 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
435 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
436
437 amdgpu_virt_disable_access_debugfs(adev);
438 return result;
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
454 size_t size, loff_t *pos)
455{
456 struct amdgpu_device *adev = file_inode(f)->i_private;
457 ssize_t result = 0;
458 int r;
459
460 if (size & 0x3 || *pos & 0x3)
461 return -EINVAL;
462
463 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
464 if (r < 0) {
465 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
466 return r;
467 }
468
469 r = amdgpu_virt_enable_access_debugfs(adev);
470 if (r < 0) {
471 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
472 return r;
473 }
474
475 while (size) {
476 uint32_t value;
477
478 value = RREG32_DIDT(*pos >> 2);
479 r = put_user(value, (uint32_t *)buf);
480 if (r) {
481 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
482 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
483 amdgpu_virt_disable_access_debugfs(adev);
484 return r;
485 }
486
487 result += 4;
488 buf += 4;
489 *pos += 4;
490 size -= 4;
491 }
492
493 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
494 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
495
496 amdgpu_virt_disable_access_debugfs(adev);
497 return result;
498}
499
500
501
502
503
504
505
506
507
508
509
510
511
512static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
513 size_t size, loff_t *pos)
514{
515 struct amdgpu_device *adev = file_inode(f)->i_private;
516 ssize_t result = 0;
517 int r;
518
519 if (size & 0x3 || *pos & 0x3)
520 return -EINVAL;
521
522 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
523 if (r < 0) {
524 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
525 return r;
526 }
527
528 r = amdgpu_virt_enable_access_debugfs(adev);
529 if (r < 0) {
530 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
531 return r;
532 }
533
534 while (size) {
535 uint32_t value;
536
537 r = get_user(value, (uint32_t *)buf);
538 if (r) {
539 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
540 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
541 amdgpu_virt_disable_access_debugfs(adev);
542 return r;
543 }
544
545 WREG32_DIDT(*pos >> 2, value);
546
547 result += 4;
548 buf += 4;
549 *pos += 4;
550 size -= 4;
551 }
552
553 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
554 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
555
556 amdgpu_virt_disable_access_debugfs(adev);
557 return result;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
573 size_t size, loff_t *pos)
574{
575 struct amdgpu_device *adev = file_inode(f)->i_private;
576 ssize_t result = 0;
577 int r;
578
579 if (size & 0x3 || *pos & 0x3)
580 return -EINVAL;
581
582 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
583 if (r < 0) {
584 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
585 return r;
586 }
587
588 r = amdgpu_virt_enable_access_debugfs(adev);
589 if (r < 0) {
590 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
591 return r;
592 }
593
594 while (size) {
595 uint32_t value;
596
597 value = RREG32_SMC(*pos);
598 r = put_user(value, (uint32_t *)buf);
599 if (r) {
600 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
601 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
602 amdgpu_virt_disable_access_debugfs(adev);
603 return r;
604 }
605
606 result += 4;
607 buf += 4;
608 *pos += 4;
609 size -= 4;
610 }
611
612 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
613 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
614
615 amdgpu_virt_disable_access_debugfs(adev);
616 return result;
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
632 size_t size, loff_t *pos)
633{
634 struct amdgpu_device *adev = file_inode(f)->i_private;
635 ssize_t result = 0;
636 int r;
637
638 if (size & 0x3 || *pos & 0x3)
639 return -EINVAL;
640
641 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
642 if (r < 0) {
643 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
644 return r;
645 }
646
647 r = amdgpu_virt_enable_access_debugfs(adev);
648 if (r < 0) {
649 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
650 return r;
651 }
652
653 while (size) {
654 uint32_t value;
655
656 r = get_user(value, (uint32_t *)buf);
657 if (r) {
658 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
659 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
660 amdgpu_virt_disable_access_debugfs(adev);
661 return r;
662 }
663
664 WREG32_SMC(*pos, value);
665
666 result += 4;
667 buf += 4;
668 *pos += 4;
669 size -= 4;
670 }
671
672 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
673 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
674
675 amdgpu_virt_disable_access_debugfs(adev);
676 return result;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
694 size_t size, loff_t *pos)
695{
696 struct amdgpu_device *adev = file_inode(f)->i_private;
697 ssize_t result = 0;
698 int r;
699 uint32_t *config, no_regs = 0;
700
701 if (size & 0x3 || *pos & 0x3)
702 return -EINVAL;
703
704 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
705 if (!config)
706 return -ENOMEM;
707
708
709 config[no_regs++] = 3;
710 config[no_regs++] = adev->gfx.config.max_shader_engines;
711 config[no_regs++] = adev->gfx.config.max_tile_pipes;
712 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
713 config[no_regs++] = adev->gfx.config.max_sh_per_se;
714 config[no_regs++] = adev->gfx.config.max_backends_per_se;
715 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
716 config[no_regs++] = adev->gfx.config.max_gprs;
717 config[no_regs++] = adev->gfx.config.max_gs_threads;
718 config[no_regs++] = adev->gfx.config.max_hw_contexts;
719 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
720 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
721 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
722 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
723 config[no_regs++] = adev->gfx.config.num_tile_pipes;
724 config[no_regs++] = adev->gfx.config.backend_enable_mask;
725 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
726 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
727 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
728 config[no_regs++] = adev->gfx.config.num_gpus;
729 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
730 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
731 config[no_regs++] = adev->gfx.config.gb_addr_config;
732 config[no_regs++] = adev->gfx.config.num_rbs;
733
734
735 config[no_regs++] = adev->rev_id;
736 config[no_regs++] = adev->pg_flags;
737 config[no_regs++] = adev->cg_flags;
738
739
740 config[no_regs++] = adev->family;
741 config[no_regs++] = adev->external_rev_id;
742
743
744 config[no_regs++] = adev->pdev->device;
745 config[no_regs++] = adev->pdev->revision;
746 config[no_regs++] = adev->pdev->subsystem_device;
747 config[no_regs++] = adev->pdev->subsystem_vendor;
748
749 while (size && (*pos < no_regs * 4)) {
750 uint32_t value;
751
752 value = config[*pos >> 2];
753 r = put_user(value, (uint32_t *)buf);
754 if (r) {
755 kfree(config);
756 return r;
757 }
758
759 result += 4;
760 buf += 4;
761 *pos += 4;
762 size -= 4;
763 }
764
765 kfree(config);
766 return result;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
783 size_t size, loff_t *pos)
784{
785 struct amdgpu_device *adev = file_inode(f)->i_private;
786 int idx, x, outsize, r, valuesize;
787 uint32_t values[16];
788
789 if (size & 3 || *pos & 0x3)
790 return -EINVAL;
791
792 if (!adev->pm.dpm_enabled)
793 return -EINVAL;
794
795
796 idx = *pos >> 2;
797
798 valuesize = sizeof(values);
799
800 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
801 if (r < 0) {
802 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
803 return r;
804 }
805
806 r = amdgpu_virt_enable_access_debugfs(adev);
807 if (r < 0) {
808 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
809 return r;
810 }
811
812 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
813
814 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
815 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
816
817 if (r) {
818 amdgpu_virt_disable_access_debugfs(adev);
819 return r;
820 }
821
822 if (size > valuesize) {
823 amdgpu_virt_disable_access_debugfs(adev);
824 return -EINVAL;
825 }
826
827 outsize = 0;
828 x = 0;
829 if (!r) {
830 while (size) {
831 r = put_user(values[x++], (int32_t *)buf);
832 buf += 4;
833 size -= 4;
834 outsize += 4;
835 }
836 }
837
838 amdgpu_virt_disable_access_debugfs(adev);
839 return !r ? outsize : r;
840}
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
864 size_t size, loff_t *pos)
865{
866 struct amdgpu_device *adev = f->f_inode->i_private;
867 int r, x;
868 ssize_t result = 0;
869 uint32_t offset, se, sh, cu, wave, simd, data[32];
870
871 if (size & 3 || *pos & 3)
872 return -EINVAL;
873
874
875 offset = (*pos & GENMASK_ULL(6, 0));
876 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
877 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
878 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
879 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
880 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
881
882 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
883 if (r < 0) {
884 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
885 return r;
886 }
887
888 r = amdgpu_virt_enable_access_debugfs(adev);
889 if (r < 0) {
890 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
891 return r;
892 }
893
894
895 mutex_lock(&adev->grbm_idx_mutex);
896 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
897
898 x = 0;
899 if (adev->gfx.funcs->read_wave_data)
900 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
901
902 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
903 mutex_unlock(&adev->grbm_idx_mutex);
904
905 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
906 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
907
908 if (!x) {
909 amdgpu_virt_disable_access_debugfs(adev);
910 return -EINVAL;
911 }
912
913 while (size && (offset < x * 4)) {
914 uint32_t value;
915
916 value = data[offset >> 2];
917 r = put_user(value, (uint32_t *)buf);
918 if (r) {
919 amdgpu_virt_disable_access_debugfs(adev);
920 return r;
921 }
922
923 result += 4;
924 buf += 4;
925 offset += 4;
926 size -= 4;
927 }
928
929 amdgpu_virt_disable_access_debugfs(adev);
930 return result;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
956 size_t size, loff_t *pos)
957{
958 struct amdgpu_device *adev = f->f_inode->i_private;
959 int r;
960 ssize_t result = 0;
961 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
962
963 if (size > 4096 || size & 3 || *pos & 3)
964 return -EINVAL;
965
966
967 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
968 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
969 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
970 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
971 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
972 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
973 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
974 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
975
976 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
977 if (!data)
978 return -ENOMEM;
979
980 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
981 if (r < 0)
982 goto err;
983
984 r = amdgpu_virt_enable_access_debugfs(adev);
985 if (r < 0)
986 goto err;
987
988
989 mutex_lock(&adev->grbm_idx_mutex);
990 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
991
992 if (bank == 0) {
993 if (adev->gfx.funcs->read_wave_vgprs)
994 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
995 } else {
996 if (adev->gfx.funcs->read_wave_sgprs)
997 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
998 }
999
1000 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
1001 mutex_unlock(&adev->grbm_idx_mutex);
1002
1003 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1004 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1005
1006 while (size) {
1007 uint32_t value;
1008
1009 value = data[result >> 2];
1010 r = put_user(value, (uint32_t *)buf);
1011 if (r) {
1012 amdgpu_virt_disable_access_debugfs(adev);
1013 goto err;
1014 }
1015
1016 result += 4;
1017 buf += 4;
1018 size -= 4;
1019 }
1020
1021 kfree(data);
1022 amdgpu_virt_disable_access_debugfs(adev);
1023 return result;
1024
1025err:
1026 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1027 kfree(data);
1028 return r;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
1042 size_t size, loff_t *pos)
1043{
1044 struct amdgpu_device *adev = file_inode(f)->i_private;
1045 ssize_t result = 0;
1046 int r;
1047
1048 if (size & 0x3 || *pos & 0x3)
1049 return -EINVAL;
1050
1051 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1052 if (r < 0) {
1053 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1054 return r;
1055 }
1056
1057 while (size) {
1058 uint32_t value;
1059
1060 r = get_user(value, (uint32_t *)buf);
1061 if (r) {
1062 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1063 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1064 return r;
1065 }
1066
1067 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1068
1069 result += 4;
1070 buf += 4;
1071 *pos += 4;
1072 size -= 4;
1073 }
1074
1075 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1076 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1077
1078 return result;
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
1091 size_t size, loff_t *pos)
1092{
1093 struct amdgpu_device *adev = file_inode(f)->i_private;
1094 ssize_t result = 0;
1095 int r;
1096
1097 if (size & 0x3 || *pos & 0x3)
1098 return -EINVAL;
1099
1100 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1101 if (r < 0)
1102 return r;
1103
1104 while (size) {
1105 uint32_t value;
1106
1107 r = amdgpu_get_gfx_off_status(adev, &value);
1108 if (r) {
1109 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1110 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1111 return r;
1112 }
1113
1114 r = put_user(value, (uint32_t *)buf);
1115 if (r) {
1116 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1117 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1118 return r;
1119 }
1120
1121 result += 4;
1122 buf += 4;
1123 *pos += 4;
1124 size -= 4;
1125 }
1126
1127 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1128 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1129
1130 return result;
1131}
1132
1133static const struct file_operations amdgpu_debugfs_regs_fops = {
1134 .owner = THIS_MODULE,
1135 .read = amdgpu_debugfs_regs_read,
1136 .write = amdgpu_debugfs_regs_write,
1137 .llseek = default_llseek
1138};
1139static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1140 .owner = THIS_MODULE,
1141 .read = amdgpu_debugfs_regs_didt_read,
1142 .write = amdgpu_debugfs_regs_didt_write,
1143 .llseek = default_llseek
1144};
1145static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1146 .owner = THIS_MODULE,
1147 .read = amdgpu_debugfs_regs_pcie_read,
1148 .write = amdgpu_debugfs_regs_pcie_write,
1149 .llseek = default_llseek
1150};
1151static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1152 .owner = THIS_MODULE,
1153 .read = amdgpu_debugfs_regs_smc_read,
1154 .write = amdgpu_debugfs_regs_smc_write,
1155 .llseek = default_llseek
1156};
1157
1158static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1159 .owner = THIS_MODULE,
1160 .read = amdgpu_debugfs_gca_config_read,
1161 .llseek = default_llseek
1162};
1163
1164static const struct file_operations amdgpu_debugfs_sensors_fops = {
1165 .owner = THIS_MODULE,
1166 .read = amdgpu_debugfs_sensor_read,
1167 .llseek = default_llseek
1168};
1169
1170static const struct file_operations amdgpu_debugfs_wave_fops = {
1171 .owner = THIS_MODULE,
1172 .read = amdgpu_debugfs_wave_read,
1173 .llseek = default_llseek
1174};
1175static const struct file_operations amdgpu_debugfs_gpr_fops = {
1176 .owner = THIS_MODULE,
1177 .read = amdgpu_debugfs_gpr_read,
1178 .llseek = default_llseek
1179};
1180
1181static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1182 .owner = THIS_MODULE,
1183 .read = amdgpu_debugfs_gfxoff_read,
1184 .write = amdgpu_debugfs_gfxoff_write,
1185 .llseek = default_llseek
1186};
1187
1188static const struct file_operations *debugfs_regs[] = {
1189 &amdgpu_debugfs_regs_fops,
1190 &amdgpu_debugfs_regs_didt_fops,
1191 &amdgpu_debugfs_regs_pcie_fops,
1192 &amdgpu_debugfs_regs_smc_fops,
1193 &amdgpu_debugfs_gca_config_fops,
1194 &amdgpu_debugfs_sensors_fops,
1195 &amdgpu_debugfs_wave_fops,
1196 &amdgpu_debugfs_gpr_fops,
1197 &amdgpu_debugfs_gfxoff_fops,
1198};
1199
1200static const char *debugfs_regs_names[] = {
1201 "amdgpu_regs",
1202 "amdgpu_regs_didt",
1203 "amdgpu_regs_pcie",
1204 "amdgpu_regs_smc",
1205 "amdgpu_gca_config",
1206 "amdgpu_sensors",
1207 "amdgpu_wave",
1208 "amdgpu_gpr",
1209 "amdgpu_gfxoff",
1210};
1211
1212
1213
1214
1215
1216
1217
1218int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1219{
1220 struct drm_minor *minor = adev_to_drm(adev)->primary;
1221 struct dentry *ent, *root = minor->debugfs_root;
1222 unsigned int i;
1223
1224 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1225 ent = debugfs_create_file(debugfs_regs_names[i],
1226 S_IFREG | S_IRUGO, root,
1227 adev, debugfs_regs[i]);
1228 if (!i && !IS_ERR_OR_NULL(ent))
1229 i_size_write(ent->d_inode, adev->rmmio_size);
1230 adev->debugfs_regs[i] = ent;
1231 }
1232
1233 return 0;
1234}
1235
1236static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
1237{
1238 struct drm_info_node *node = (struct drm_info_node *) m->private;
1239 struct drm_device *dev = node->minor->dev;
1240 struct amdgpu_device *adev = drm_to_adev(dev);
1241 int r = 0, i;
1242
1243 r = pm_runtime_get_sync(dev->dev);
1244 if (r < 0) {
1245 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1246 return r;
1247 }
1248
1249
1250 r = down_read_killable(&adev->reset_sem);
1251 if (r)
1252 return r;
1253
1254
1255 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1256 struct amdgpu_ring *ring = adev->rings[i];
1257
1258 if (!ring || !ring->sched.thread)
1259 continue;
1260 kthread_park(ring->sched.thread);
1261 }
1262
1263 seq_printf(m, "run ib test:\n");
1264 r = amdgpu_ib_ring_tests(adev);
1265 if (r)
1266 seq_printf(m, "ib ring tests failed (%d).\n", r);
1267 else
1268 seq_printf(m, "ib ring tests passed.\n");
1269
1270
1271 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1272 struct amdgpu_ring *ring = adev->rings[i];
1273
1274 if (!ring || !ring->sched.thread)
1275 continue;
1276 kthread_unpark(ring->sched.thread);
1277 }
1278
1279 up_read(&adev->reset_sem);
1280
1281 pm_runtime_mark_last_busy(dev->dev);
1282 pm_runtime_put_autosuspend(dev->dev);
1283
1284 return 0;
1285}
1286
1287static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1288{
1289 struct drm_info_node *node = (struct drm_info_node *) m->private;
1290 struct drm_device *dev = node->minor->dev;
1291 struct amdgpu_device *adev = drm_to_adev(dev);
1292
1293 seq_write(m, adev->bios, adev->bios_size);
1294 return 0;
1295}
1296
1297static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1298{
1299 struct drm_info_node *node = (struct drm_info_node *)m->private;
1300 struct drm_device *dev = node->minor->dev;
1301 struct amdgpu_device *adev = drm_to_adev(dev);
1302 int r;
1303
1304 r = pm_runtime_get_sync(dev->dev);
1305 if (r < 0) {
1306 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1307 return r;
1308 }
1309
1310 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1311
1312 pm_runtime_mark_last_busy(dev->dev);
1313 pm_runtime_put_autosuspend(dev->dev);
1314
1315 return 0;
1316}
1317
1318static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1319{
1320 struct drm_info_node *node = (struct drm_info_node *)m->private;
1321 struct drm_device *dev = node->minor->dev;
1322 struct amdgpu_device *adev = drm_to_adev(dev);
1323 struct ttm_resource_manager *man;
1324 int r;
1325
1326 r = pm_runtime_get_sync(dev->dev);
1327 if (r < 0) {
1328 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1329 return r;
1330 }
1331
1332 man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
1333 r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
1334 seq_printf(m, "(%d)\n", r);
1335
1336 pm_runtime_mark_last_busy(dev->dev);
1337 pm_runtime_put_autosuspend(dev->dev);
1338
1339 return 0;
1340}
1341
1342static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
1343{
1344 struct drm_info_node *node = (struct drm_info_node *)m->private;
1345 struct drm_device *dev = node->minor->dev;
1346 struct drm_file *file;
1347 int r;
1348
1349 r = mutex_lock_interruptible(&dev->filelist_mutex);
1350 if (r)
1351 return r;
1352
1353 list_for_each_entry(file, &dev->filelist, lhead) {
1354 struct amdgpu_fpriv *fpriv = file->driver_priv;
1355 struct amdgpu_vm *vm = &fpriv->vm;
1356
1357 seq_printf(m, "pid:%d\tProcess:%s ----------\n",
1358 vm->task_info.pid, vm->task_info.process_name);
1359 r = amdgpu_bo_reserve(vm->root.base.bo, true);
1360 if (r)
1361 break;
1362 amdgpu_debugfs_vm_bo_info(vm, m);
1363 amdgpu_bo_unreserve(vm->root.base.bo);
1364 }
1365
1366 mutex_unlock(&dev->filelist_mutex);
1367
1368 return r;
1369}
1370
1371static const struct drm_info_list amdgpu_debugfs_list[] = {
1372 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1373 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1374 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1375 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1376 {"amdgpu_vm_info", &amdgpu_debugfs_vm_info},
1377};
1378
1379static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1380 struct dma_fence **fences)
1381{
1382 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1383 uint32_t sync_seq, last_seq;
1384
1385 last_seq = atomic_read(&ring->fence_drv.last_seq);
1386 sync_seq = ring->fence_drv.sync_seq;
1387
1388 last_seq &= drv->num_fences_mask;
1389 sync_seq &= drv->num_fences_mask;
1390
1391 do {
1392 struct dma_fence *fence, **ptr;
1393
1394 ++last_seq;
1395 last_seq &= drv->num_fences_mask;
1396 ptr = &drv->fences[last_seq];
1397
1398 fence = rcu_dereference_protected(*ptr, 1);
1399 RCU_INIT_POINTER(*ptr, NULL);
1400
1401 if (!fence)
1402 continue;
1403
1404 fences[last_seq] = fence;
1405
1406 } while (last_seq != sync_seq);
1407}
1408
1409static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1410 int length)
1411{
1412 int i;
1413 struct dma_fence *fence;
1414
1415 for (i = 0; i < length; i++) {
1416 fence = fences[i];
1417 if (!fence)
1418 continue;
1419 dma_fence_signal(fence);
1420 dma_fence_put(fence);
1421 }
1422}
1423
1424static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1425{
1426 struct drm_sched_job *s_job;
1427 struct dma_fence *fence;
1428
1429 spin_lock(&sched->job_list_lock);
1430 list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1431 fence = sched->ops->run_job(s_job);
1432 dma_fence_put(fence);
1433 }
1434 spin_unlock(&sched->job_list_lock);
1435}
1436
1437static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1438{
1439 struct amdgpu_job *job;
1440 struct drm_sched_job *s_job, *tmp;
1441 uint32_t preempt_seq;
1442 struct dma_fence *fence, **ptr;
1443 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1444 struct drm_gpu_scheduler *sched = &ring->sched;
1445 bool preempted = true;
1446
1447 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1448 return;
1449
1450 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1451 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1452 preempted = false;
1453 goto no_preempt;
1454 }
1455
1456 preempt_seq &= drv->num_fences_mask;
1457 ptr = &drv->fences[preempt_seq];
1458 fence = rcu_dereference_protected(*ptr, 1);
1459
1460no_preempt:
1461 spin_lock(&sched->job_list_lock);
1462 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
1463 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1464
1465 list_del_init(&s_job->node);
1466 sched->ops->free_job(s_job);
1467 continue;
1468 }
1469 job = to_amdgpu_job(s_job);
1470 if (preempted && job->fence == fence)
1471
1472 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1473 }
1474 spin_unlock(&sched->job_list_lock);
1475}
1476
1477static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1478{
1479 int r, resched, length;
1480 struct amdgpu_ring *ring;
1481 struct dma_fence **fences = NULL;
1482 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1483
1484 if (val >= AMDGPU_MAX_RINGS)
1485 return -EINVAL;
1486
1487 ring = adev->rings[val];
1488
1489 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1490 return -EINVAL;
1491
1492
1493 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1494 return -EBUSY;
1495
1496 length = ring->fence_drv.num_fences_mask + 1;
1497 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1498 if (!fences)
1499 return -ENOMEM;
1500
1501
1502 r = down_read_killable(&adev->reset_sem);
1503 if (r)
1504 goto pro_end;
1505
1506
1507 kthread_park(ring->sched.thread);
1508
1509 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1510
1511
1512 r = amdgpu_ring_preempt_ib(ring);
1513 if (r) {
1514 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1515 goto failure;
1516 }
1517
1518 amdgpu_fence_process(ring);
1519
1520 if (atomic_read(&ring->fence_drv.last_seq) !=
1521 ring->fence_drv.sync_seq) {
1522 DRM_INFO("ring %d was preempted\n", ring->idx);
1523
1524 amdgpu_ib_preempt_mark_partial_job(ring);
1525
1526
1527 amdgpu_ib_preempt_fences_swap(ring, fences);
1528
1529 amdgpu_fence_driver_force_completion(ring);
1530
1531
1532 amdgpu_ib_preempt_job_recovery(&ring->sched);
1533
1534
1535 amdgpu_fence_wait_empty(ring);
1536
1537
1538 amdgpu_ib_preempt_signal_fences(fences, length);
1539 }
1540
1541failure:
1542
1543 kthread_unpark(ring->sched.thread);
1544
1545 up_read(&adev->reset_sem);
1546
1547 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1548
1549pro_end:
1550 kfree(fences);
1551
1552 return r;
1553}
1554
1555static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1556{
1557 int ret = 0;
1558 uint32_t max_freq, min_freq;
1559 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1560
1561 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1562 return -EINVAL;
1563
1564 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1565 if (ret < 0) {
1566 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1567 return ret;
1568 }
1569
1570 if (is_support_sw_smu(adev)) {
1571 ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq);
1572 if (ret || val > max_freq || val < min_freq)
1573 return -EINVAL;
1574 ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
1575 } else {
1576 return 0;
1577 }
1578
1579 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1580 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1581
1582 if (ret)
1583 return -EINVAL;
1584
1585 return 0;
1586}
1587
1588DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1589 amdgpu_debugfs_ib_preempt, "%llu\n");
1590
1591DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1592 amdgpu_debugfs_sclk_set, "%llu\n");
1593
1594int amdgpu_debugfs_init(struct amdgpu_device *adev)
1595{
1596 int r, i;
1597
1598 adev->debugfs_preempt =
1599 debugfs_create_file("amdgpu_preempt_ib", 0600,
1600 adev_to_drm(adev)->primary->debugfs_root, adev,
1601 &fops_ib_preempt);
1602 if (!(adev->debugfs_preempt)) {
1603 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1604 return -EIO;
1605 }
1606
1607 adev->smu.debugfs_sclk =
1608 debugfs_create_file("amdgpu_force_sclk", 0200,
1609 adev_to_drm(adev)->primary->debugfs_root, adev,
1610 &fops_sclk_set);
1611 if (!(adev->smu.debugfs_sclk)) {
1612 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1613 return -EIO;
1614 }
1615
1616
1617 r = amdgpu_ttm_debugfs_init(adev);
1618 if (r) {
1619 DRM_ERROR("Failed to init debugfs\n");
1620 return r;
1621 }
1622
1623 r = amdgpu_debugfs_pm_init(adev);
1624 if (r) {
1625 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1626 return r;
1627 }
1628
1629 if (amdgpu_debugfs_sa_init(adev)) {
1630 dev_err(adev->dev, "failed to register debugfs file for SA\n");
1631 }
1632
1633 if (amdgpu_debugfs_fence_init(adev))
1634 dev_err(adev->dev, "fence debugfs file creation failed\n");
1635
1636 r = amdgpu_debugfs_gem_init(adev);
1637 if (r)
1638 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1639
1640 r = amdgpu_debugfs_regs_init(adev);
1641 if (r)
1642 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1643
1644 r = amdgpu_debugfs_firmware_init(adev);
1645 if (r)
1646 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1647
1648#if defined(CONFIG_DRM_AMD_DC)
1649 if (amdgpu_device_has_dc_support(adev)) {
1650 if (dtn_debugfs_init(adev))
1651 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1652 }
1653#endif
1654
1655 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1656 struct amdgpu_ring *ring = adev->rings[i];
1657
1658 if (!ring)
1659 continue;
1660
1661 if (amdgpu_debugfs_ring_init(adev, ring)) {
1662 DRM_ERROR("Failed to register debugfs file for rings !\n");
1663 }
1664 }
1665
1666 amdgpu_ras_debugfs_create_all(adev);
1667
1668 amdgpu_debugfs_autodump_init(adev);
1669
1670 amdgpu_rap_debugfs_init(adev);
1671
1672 amdgpu_fw_attestation_debugfs_init(adev);
1673
1674 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1675 ARRAY_SIZE(amdgpu_debugfs_list));
1676}
1677
1678#else
1679int amdgpu_debugfs_init(struct amdgpu_device *adev)
1680{
1681 return 0;
1682}
1683int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1684{
1685 return 0;
1686}
1687#endif
1688