1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <linux/kthread.h>
27#include <linux/pci.h>
28#include <linux/uaccess.h>
29#include <linux/pm_runtime.h>
30#include <linux/poll.h>
31#include <drm/drm_debugfs.h>
32
33#include "amdgpu.h"
34#include "amdgpu_pm.h"
35#include "amdgpu_dm_debugfs.h"
36#include "amdgpu_ras.h"
37
38
39
40
41
42
43
44
45
46int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
47 const struct drm_info_list *files,
48 unsigned nfiles)
49{
50 unsigned i;
51
52 for (i = 0; i < adev->debugfs_count; i++) {
53 if (adev->debugfs[i].files == files) {
54
55 return 0;
56 }
57 }
58
59 i = adev->debugfs_count + 1;
60 if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
61 DRM_ERROR("Reached maximum number of debugfs components.\n");
62 DRM_ERROR("Report so we increase "
63 "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
64 return -EINVAL;
65 }
66 adev->debugfs[adev->debugfs_count].files = files;
67 adev->debugfs[adev->debugfs_count].num_files = nfiles;
68 adev->debugfs_count = i;
69#if defined(CONFIG_DEBUG_FS)
70 drm_debugfs_create_files(files, nfiles,
71 adev->ddev->primary->debugfs_root,
72 adev->ddev->primary);
73#endif
74 return 0;
75}
76
77int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
78{
79#if defined(CONFIG_DEBUG_FS)
80 unsigned long timeout = 600 * HZ;
81 int ret;
82
83 wake_up_interruptible(&adev->autodump.gpu_hang);
84
85 ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
86 if (ret == 0) {
87 pr_err("autodump: timeout, move on to gpu recovery\n");
88 return -ETIMEDOUT;
89 }
90#endif
91 return 0;
92}
93
94#if defined(CONFIG_DEBUG_FS)
95
96static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
97{
98 struct amdgpu_device *adev = inode->i_private;
99 int ret;
100
101 file->private_data = adev;
102
103 mutex_lock(&adev->lock_reset);
104 if (adev->autodump.dumping.done) {
105 reinit_completion(&adev->autodump.dumping);
106 ret = 0;
107 } else {
108 ret = -EBUSY;
109 }
110 mutex_unlock(&adev->lock_reset);
111
112 return ret;
113}
114
115static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
116{
117 struct amdgpu_device *adev = file->private_data;
118
119 complete_all(&adev->autodump.dumping);
120 return 0;
121}
122
123static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
124{
125 struct amdgpu_device *adev = file->private_data;
126
127 poll_wait(file, &adev->autodump.gpu_hang, poll_table);
128
129 if (adev->in_gpu_reset)
130 return POLLIN | POLLRDNORM | POLLWRNORM;
131
132 return 0;
133}
134
135static const struct file_operations autodump_debug_fops = {
136 .owner = THIS_MODULE,
137 .open = amdgpu_debugfs_autodump_open,
138 .poll = amdgpu_debugfs_autodump_poll,
139 .release = amdgpu_debugfs_autodump_release,
140};
141
142static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
143{
144 init_completion(&adev->autodump.dumping);
145 complete_all(&adev->autodump.dumping);
146 init_waitqueue_head(&adev->autodump.gpu_hang);
147
148 debugfs_create_file("amdgpu_autodump", 0600,
149 adev->ddev->primary->debugfs_root,
150 adev, &autodump_debug_fops);
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
181 char __user *buf, size_t size, loff_t *pos)
182{
183 struct amdgpu_device *adev = file_inode(f)->i_private;
184 ssize_t result = 0;
185 int r;
186 bool pm_pg_lock, use_bank, use_ring;
187 unsigned instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
188
189 pm_pg_lock = use_bank = use_ring = false;
190 instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
191
192 if (size & 0x3 || *pos & 0x3 ||
193 ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
194 return -EINVAL;
195
196
197 pm_pg_lock = (*pos >> 23) & 1;
198
199 if (*pos & (1ULL << 62)) {
200 se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
201 sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
202 instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
203
204 if (se_bank == 0x3FF)
205 se_bank = 0xFFFFFFFF;
206 if (sh_bank == 0x3FF)
207 sh_bank = 0xFFFFFFFF;
208 if (instance_bank == 0x3FF)
209 instance_bank = 0xFFFFFFFF;
210 use_bank = true;
211 } else if (*pos & (1ULL << 61)) {
212
213 me = (*pos & GENMASK_ULL(33, 24)) >> 24;
214 pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
215 queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
216 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
217
218 use_ring = true;
219 } else {
220 use_bank = use_ring = false;
221 }
222
223 *pos &= (1UL << 22) - 1;
224
225 r = pm_runtime_get_sync(adev->ddev->dev);
226 if (r < 0)
227 return r;
228
229 r = amdgpu_virt_enable_access_debugfs(adev);
230 if (r < 0)
231 return r;
232
233 if (use_bank) {
234 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
235 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
236 pm_runtime_mark_last_busy(adev->ddev->dev);
237 pm_runtime_put_autosuspend(adev->ddev->dev);
238 amdgpu_virt_disable_access_debugfs(adev);
239 return -EINVAL;
240 }
241 mutex_lock(&adev->grbm_idx_mutex);
242 amdgpu_gfx_select_se_sh(adev, se_bank,
243 sh_bank, instance_bank);
244 } else if (use_ring) {
245 mutex_lock(&adev->srbm_mutex);
246 amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid);
247 }
248
249 if (pm_pg_lock)
250 mutex_lock(&adev->pm.mutex);
251
252 while (size) {
253 uint32_t value;
254
255 if (read) {
256 value = RREG32(*pos >> 2);
257 r = put_user(value, (uint32_t *)buf);
258 } else {
259 r = get_user(value, (uint32_t *)buf);
260 if (!r)
261 amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
262 }
263 if (r) {
264 result = r;
265 goto end;
266 }
267
268 result += 4;
269 buf += 4;
270 *pos += 4;
271 size -= 4;
272 }
273
274end:
275 if (use_bank) {
276 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
277 mutex_unlock(&adev->grbm_idx_mutex);
278 } else if (use_ring) {
279 amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
280 mutex_unlock(&adev->srbm_mutex);
281 }
282
283 if (pm_pg_lock)
284 mutex_unlock(&adev->pm.mutex);
285
286 pm_runtime_mark_last_busy(adev->ddev->dev);
287 pm_runtime_put_autosuspend(adev->ddev->dev);
288
289 amdgpu_virt_disable_access_debugfs(adev);
290 return result;
291}
292
293
294
295
296static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
297 size_t size, loff_t *pos)
298{
299 return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
300}
301
302
303
304
305static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
306 size_t size, loff_t *pos)
307{
308 return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
309}
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
325 size_t size, loff_t *pos)
326{
327 struct amdgpu_device *adev = file_inode(f)->i_private;
328 ssize_t result = 0;
329 int r;
330
331 if (size & 0x3 || *pos & 0x3)
332 return -EINVAL;
333
334 r = pm_runtime_get_sync(adev->ddev->dev);
335 if (r < 0)
336 return r;
337
338 r = amdgpu_virt_enable_access_debugfs(adev);
339 if (r < 0)
340 return r;
341
342 while (size) {
343 uint32_t value;
344
345 value = RREG32_PCIE(*pos >> 2);
346 r = put_user(value, (uint32_t *)buf);
347 if (r) {
348 pm_runtime_mark_last_busy(adev->ddev->dev);
349 pm_runtime_put_autosuspend(adev->ddev->dev);
350 amdgpu_virt_disable_access_debugfs(adev);
351 return r;
352 }
353
354 result += 4;
355 buf += 4;
356 *pos += 4;
357 size -= 4;
358 }
359
360 pm_runtime_mark_last_busy(adev->ddev->dev);
361 pm_runtime_put_autosuspend(adev->ddev->dev);
362
363 amdgpu_virt_disable_access_debugfs(adev);
364 return result;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
380 size_t size, loff_t *pos)
381{
382 struct amdgpu_device *adev = file_inode(f)->i_private;
383 ssize_t result = 0;
384 int r;
385
386 if (size & 0x3 || *pos & 0x3)
387 return -EINVAL;
388
389 r = pm_runtime_get_sync(adev->ddev->dev);
390 if (r < 0)
391 return r;
392
393 r = amdgpu_virt_enable_access_debugfs(adev);
394 if (r < 0)
395 return r;
396
397 while (size) {
398 uint32_t value;
399
400 r = get_user(value, (uint32_t *)buf);
401 if (r) {
402 pm_runtime_mark_last_busy(adev->ddev->dev);
403 pm_runtime_put_autosuspend(adev->ddev->dev);
404 amdgpu_virt_disable_access_debugfs(adev);
405 return r;
406 }
407
408 WREG32_PCIE(*pos >> 2, value);
409
410 result += 4;
411 buf += 4;
412 *pos += 4;
413 size -= 4;
414 }
415
416 pm_runtime_mark_last_busy(adev->ddev->dev);
417 pm_runtime_put_autosuspend(adev->ddev->dev);
418
419 amdgpu_virt_disable_access_debugfs(adev);
420 return result;
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
436 size_t size, loff_t *pos)
437{
438 struct amdgpu_device *adev = file_inode(f)->i_private;
439 ssize_t result = 0;
440 int r;
441
442 if (size & 0x3 || *pos & 0x3)
443 return -EINVAL;
444
445 r = pm_runtime_get_sync(adev->ddev->dev);
446 if (r < 0)
447 return r;
448
449 r = amdgpu_virt_enable_access_debugfs(adev);
450 if (r < 0)
451 return r;
452
453 while (size) {
454 uint32_t value;
455
456 value = RREG32_DIDT(*pos >> 2);
457 r = put_user(value, (uint32_t *)buf);
458 if (r) {
459 pm_runtime_mark_last_busy(adev->ddev->dev);
460 pm_runtime_put_autosuspend(adev->ddev->dev);
461 amdgpu_virt_disable_access_debugfs(adev);
462 return r;
463 }
464
465 result += 4;
466 buf += 4;
467 *pos += 4;
468 size -= 4;
469 }
470
471 pm_runtime_mark_last_busy(adev->ddev->dev);
472 pm_runtime_put_autosuspend(adev->ddev->dev);
473
474 amdgpu_virt_disable_access_debugfs(adev);
475 return result;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489
490static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
491 size_t size, loff_t *pos)
492{
493 struct amdgpu_device *adev = file_inode(f)->i_private;
494 ssize_t result = 0;
495 int r;
496
497 if (size & 0x3 || *pos & 0x3)
498 return -EINVAL;
499
500 r = pm_runtime_get_sync(adev->ddev->dev);
501 if (r < 0)
502 return r;
503
504 r = amdgpu_virt_enable_access_debugfs(adev);
505 if (r < 0)
506 return r;
507
508 while (size) {
509 uint32_t value;
510
511 r = get_user(value, (uint32_t *)buf);
512 if (r) {
513 pm_runtime_mark_last_busy(adev->ddev->dev);
514 pm_runtime_put_autosuspend(adev->ddev->dev);
515 amdgpu_virt_disable_access_debugfs(adev);
516 return r;
517 }
518
519 WREG32_DIDT(*pos >> 2, value);
520
521 result += 4;
522 buf += 4;
523 *pos += 4;
524 size -= 4;
525 }
526
527 pm_runtime_mark_last_busy(adev->ddev->dev);
528 pm_runtime_put_autosuspend(adev->ddev->dev);
529
530 amdgpu_virt_disable_access_debugfs(adev);
531 return result;
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
547 size_t size, loff_t *pos)
548{
549 struct amdgpu_device *adev = file_inode(f)->i_private;
550 ssize_t result = 0;
551 int r;
552
553 if (size & 0x3 || *pos & 0x3)
554 return -EINVAL;
555
556 r = pm_runtime_get_sync(adev->ddev->dev);
557 if (r < 0)
558 return r;
559
560 r = amdgpu_virt_enable_access_debugfs(adev);
561 if (r < 0)
562 return r;
563
564 while (size) {
565 uint32_t value;
566
567 value = RREG32_SMC(*pos);
568 r = put_user(value, (uint32_t *)buf);
569 if (r) {
570 pm_runtime_mark_last_busy(adev->ddev->dev);
571 pm_runtime_put_autosuspend(adev->ddev->dev);
572 amdgpu_virt_disable_access_debugfs(adev);
573 return r;
574 }
575
576 result += 4;
577 buf += 4;
578 *pos += 4;
579 size -= 4;
580 }
581
582 pm_runtime_mark_last_busy(adev->ddev->dev);
583 pm_runtime_put_autosuspend(adev->ddev->dev);
584
585 amdgpu_virt_disable_access_debugfs(adev);
586 return result;
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
602 size_t size, loff_t *pos)
603{
604 struct amdgpu_device *adev = file_inode(f)->i_private;
605 ssize_t result = 0;
606 int r;
607
608 if (size & 0x3 || *pos & 0x3)
609 return -EINVAL;
610
611 r = pm_runtime_get_sync(adev->ddev->dev);
612 if (r < 0)
613 return r;
614
615 r = amdgpu_virt_enable_access_debugfs(adev);
616 if (r < 0)
617 return r;
618
619 while (size) {
620 uint32_t value;
621
622 r = get_user(value, (uint32_t *)buf);
623 if (r) {
624 pm_runtime_mark_last_busy(adev->ddev->dev);
625 pm_runtime_put_autosuspend(adev->ddev->dev);
626 amdgpu_virt_disable_access_debugfs(adev);
627 return r;
628 }
629
630 WREG32_SMC(*pos, value);
631
632 result += 4;
633 buf += 4;
634 *pos += 4;
635 size -= 4;
636 }
637
638 pm_runtime_mark_last_busy(adev->ddev->dev);
639 pm_runtime_put_autosuspend(adev->ddev->dev);
640
641 amdgpu_virt_disable_access_debugfs(adev);
642 return result;
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
660 size_t size, loff_t *pos)
661{
662 struct amdgpu_device *adev = file_inode(f)->i_private;
663 ssize_t result = 0;
664 int r;
665 uint32_t *config, no_regs = 0;
666
667 if (size & 0x3 || *pos & 0x3)
668 return -EINVAL;
669
670 config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
671 if (!config)
672 return -ENOMEM;
673
674
675 config[no_regs++] = 3;
676 config[no_regs++] = adev->gfx.config.max_shader_engines;
677 config[no_regs++] = adev->gfx.config.max_tile_pipes;
678 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
679 config[no_regs++] = adev->gfx.config.max_sh_per_se;
680 config[no_regs++] = adev->gfx.config.max_backends_per_se;
681 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
682 config[no_regs++] = adev->gfx.config.max_gprs;
683 config[no_regs++] = adev->gfx.config.max_gs_threads;
684 config[no_regs++] = adev->gfx.config.max_hw_contexts;
685 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
686 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
687 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
688 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
689 config[no_regs++] = adev->gfx.config.num_tile_pipes;
690 config[no_regs++] = adev->gfx.config.backend_enable_mask;
691 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
692 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
693 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
694 config[no_regs++] = adev->gfx.config.num_gpus;
695 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
696 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
697 config[no_regs++] = adev->gfx.config.gb_addr_config;
698 config[no_regs++] = adev->gfx.config.num_rbs;
699
700
701 config[no_regs++] = adev->rev_id;
702 config[no_regs++] = adev->pg_flags;
703 config[no_regs++] = adev->cg_flags;
704
705
706 config[no_regs++] = adev->family;
707 config[no_regs++] = adev->external_rev_id;
708
709
710 config[no_regs++] = adev->pdev->device;
711 config[no_regs++] = adev->pdev->revision;
712 config[no_regs++] = adev->pdev->subsystem_device;
713 config[no_regs++] = adev->pdev->subsystem_vendor;
714
715 while (size && (*pos < no_regs * 4)) {
716 uint32_t value;
717
718 value = config[*pos >> 2];
719 r = put_user(value, (uint32_t *)buf);
720 if (r) {
721 kfree(config);
722 return r;
723 }
724
725 result += 4;
726 buf += 4;
727 *pos += 4;
728 size -= 4;
729 }
730
731 kfree(config);
732 return result;
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
749 size_t size, loff_t *pos)
750{
751 struct amdgpu_device *adev = file_inode(f)->i_private;
752 int idx, x, outsize, r, valuesize;
753 uint32_t values[16];
754
755 if (size & 3 || *pos & 0x3)
756 return -EINVAL;
757
758 if (!adev->pm.dpm_enabled)
759 return -EINVAL;
760
761
762 idx = *pos >> 2;
763
764 valuesize = sizeof(values);
765
766 r = pm_runtime_get_sync(adev->ddev->dev);
767 if (r < 0)
768 return r;
769
770 r = amdgpu_virt_enable_access_debugfs(adev);
771 if (r < 0)
772 return r;
773
774 r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
775
776 pm_runtime_mark_last_busy(adev->ddev->dev);
777 pm_runtime_put_autosuspend(adev->ddev->dev);
778
779 if (r) {
780 amdgpu_virt_disable_access_debugfs(adev);
781 return r;
782 }
783
784 if (size > valuesize) {
785 amdgpu_virt_disable_access_debugfs(adev);
786 return -EINVAL;
787 }
788
789 outsize = 0;
790 x = 0;
791 if (!r) {
792 while (size) {
793 r = put_user(values[x++], (int32_t *)buf);
794 buf += 4;
795 size -= 4;
796 outsize += 4;
797 }
798 }
799
800 amdgpu_virt_disable_access_debugfs(adev);
801 return !r ? outsize : r;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
826 size_t size, loff_t *pos)
827{
828 struct amdgpu_device *adev = f->f_inode->i_private;
829 int r, x;
830 ssize_t result=0;
831 uint32_t offset, se, sh, cu, wave, simd, data[32];
832
833 if (size & 3 || *pos & 3)
834 return -EINVAL;
835
836
837 offset = (*pos & GENMASK_ULL(6, 0));
838 se = (*pos & GENMASK_ULL(14, 7)) >> 7;
839 sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
840 cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
841 wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
842 simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
843
844 r = pm_runtime_get_sync(adev->ddev->dev);
845 if (r < 0)
846 return r;
847
848 r = amdgpu_virt_enable_access_debugfs(adev);
849 if (r < 0)
850 return r;
851
852
853 mutex_lock(&adev->grbm_idx_mutex);
854 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
855
856 x = 0;
857 if (adev->gfx.funcs->read_wave_data)
858 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
859
860 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
861 mutex_unlock(&adev->grbm_idx_mutex);
862
863 pm_runtime_mark_last_busy(adev->ddev->dev);
864 pm_runtime_put_autosuspend(adev->ddev->dev);
865
866 if (!x) {
867 amdgpu_virt_disable_access_debugfs(adev);
868 return -EINVAL;
869 }
870
871 while (size && (offset < x * 4)) {
872 uint32_t value;
873
874 value = data[offset >> 2];
875 r = put_user(value, (uint32_t *)buf);
876 if (r) {
877 amdgpu_virt_disable_access_debugfs(adev);
878 return r;
879 }
880
881 result += 4;
882 buf += 4;
883 offset += 4;
884 size -= 4;
885 }
886
887 amdgpu_virt_disable_access_debugfs(adev);
888 return result;
889}
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
914 size_t size, loff_t *pos)
915{
916 struct amdgpu_device *adev = f->f_inode->i_private;
917 int r;
918 ssize_t result = 0;
919 uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
920
921 if (size > 4096 || size & 3 || *pos & 3)
922 return -EINVAL;
923
924
925 offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
926 se = (*pos & GENMASK_ULL(19, 12)) >> 12;
927 sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
928 cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
929 wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
930 simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
931 thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
932 bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
933
934 data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
935 if (!data)
936 return -ENOMEM;
937
938 r = pm_runtime_get_sync(adev->ddev->dev);
939 if (r < 0)
940 return r;
941
942 r = amdgpu_virt_enable_access_debugfs(adev);
943 if (r < 0)
944 return r;
945
946
947 mutex_lock(&adev->grbm_idx_mutex);
948 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
949
950 if (bank == 0) {
951 if (adev->gfx.funcs->read_wave_vgprs)
952 adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
953 } else {
954 if (adev->gfx.funcs->read_wave_sgprs)
955 adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
956 }
957
958 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
959 mutex_unlock(&adev->grbm_idx_mutex);
960
961 pm_runtime_mark_last_busy(adev->ddev->dev);
962 pm_runtime_put_autosuspend(adev->ddev->dev);
963
964 while (size) {
965 uint32_t value;
966
967 value = data[result >> 2];
968 r = put_user(value, (uint32_t *)buf);
969 if (r) {
970 result = r;
971 goto err;
972 }
973
974 result += 4;
975 buf += 4;
976 size -= 4;
977 }
978
979err:
980 kfree(data);
981 amdgpu_virt_disable_access_debugfs(adev);
982 return result;
983}
984
985
986
987
988
989
990
991
992
993
994
995static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
996 size_t size, loff_t *pos)
997{
998 struct amdgpu_device *adev = file_inode(f)->i_private;
999 ssize_t result = 0;
1000 int r;
1001
1002 if (size & 0x3 || *pos & 0x3)
1003 return -EINVAL;
1004
1005 r = pm_runtime_get_sync(adev->ddev->dev);
1006 if (r < 0)
1007 return r;
1008
1009 while (size) {
1010 uint32_t value;
1011
1012 r = get_user(value, (uint32_t *)buf);
1013 if (r) {
1014 pm_runtime_mark_last_busy(adev->ddev->dev);
1015 pm_runtime_put_autosuspend(adev->ddev->dev);
1016 return r;
1017 }
1018
1019 amdgpu_gfx_off_ctrl(adev, value ? true : false);
1020
1021 result += 4;
1022 buf += 4;
1023 *pos += 4;
1024 size -= 4;
1025 }
1026
1027 pm_runtime_mark_last_busy(adev->ddev->dev);
1028 pm_runtime_put_autosuspend(adev->ddev->dev);
1029
1030 return result;
1031}
1032
1033
1034static const struct file_operations amdgpu_debugfs_regs_fops = {
1035 .owner = THIS_MODULE,
1036 .read = amdgpu_debugfs_regs_read,
1037 .write = amdgpu_debugfs_regs_write,
1038 .llseek = default_llseek
1039};
1040static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
1041 .owner = THIS_MODULE,
1042 .read = amdgpu_debugfs_regs_didt_read,
1043 .write = amdgpu_debugfs_regs_didt_write,
1044 .llseek = default_llseek
1045};
1046static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
1047 .owner = THIS_MODULE,
1048 .read = amdgpu_debugfs_regs_pcie_read,
1049 .write = amdgpu_debugfs_regs_pcie_write,
1050 .llseek = default_llseek
1051};
1052static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
1053 .owner = THIS_MODULE,
1054 .read = amdgpu_debugfs_regs_smc_read,
1055 .write = amdgpu_debugfs_regs_smc_write,
1056 .llseek = default_llseek
1057};
1058
1059static const struct file_operations amdgpu_debugfs_gca_config_fops = {
1060 .owner = THIS_MODULE,
1061 .read = amdgpu_debugfs_gca_config_read,
1062 .llseek = default_llseek
1063};
1064
1065static const struct file_operations amdgpu_debugfs_sensors_fops = {
1066 .owner = THIS_MODULE,
1067 .read = amdgpu_debugfs_sensor_read,
1068 .llseek = default_llseek
1069};
1070
1071static const struct file_operations amdgpu_debugfs_wave_fops = {
1072 .owner = THIS_MODULE,
1073 .read = amdgpu_debugfs_wave_read,
1074 .llseek = default_llseek
1075};
1076static const struct file_operations amdgpu_debugfs_gpr_fops = {
1077 .owner = THIS_MODULE,
1078 .read = amdgpu_debugfs_gpr_read,
1079 .llseek = default_llseek
1080};
1081
1082static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
1083 .owner = THIS_MODULE,
1084 .write = amdgpu_debugfs_gfxoff_write,
1085};
1086
1087static const struct file_operations *debugfs_regs[] = {
1088 &amdgpu_debugfs_regs_fops,
1089 &amdgpu_debugfs_regs_didt_fops,
1090 &amdgpu_debugfs_regs_pcie_fops,
1091 &amdgpu_debugfs_regs_smc_fops,
1092 &amdgpu_debugfs_gca_config_fops,
1093 &amdgpu_debugfs_sensors_fops,
1094 &amdgpu_debugfs_wave_fops,
1095 &amdgpu_debugfs_gpr_fops,
1096 &amdgpu_debugfs_gfxoff_fops,
1097};
1098
1099static const char *debugfs_regs_names[] = {
1100 "amdgpu_regs",
1101 "amdgpu_regs_didt",
1102 "amdgpu_regs_pcie",
1103 "amdgpu_regs_smc",
1104 "amdgpu_gca_config",
1105 "amdgpu_sensors",
1106 "amdgpu_wave",
1107 "amdgpu_gpr",
1108 "amdgpu_gfxoff",
1109};
1110
1111
1112
1113
1114
1115
1116
1117int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1118{
1119 struct drm_minor *minor = adev->ddev->primary;
1120 struct dentry *ent, *root = minor->debugfs_root;
1121 unsigned int i;
1122
1123 for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
1124 ent = debugfs_create_file(debugfs_regs_names[i],
1125 S_IFREG | S_IRUGO, root,
1126 adev, debugfs_regs[i]);
1127 if (!i && !IS_ERR_OR_NULL(ent))
1128 i_size_write(ent->d_inode, adev->rmmio_size);
1129 adev->debugfs_regs[i] = ent;
1130 }
1131
1132 return 0;
1133}
1134
1135static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
1136{
1137 struct drm_info_node *node = (struct drm_info_node *) m->private;
1138 struct drm_device *dev = node->minor->dev;
1139 struct amdgpu_device *adev = dev->dev_private;
1140 int r = 0, i;
1141
1142 r = pm_runtime_get_sync(dev->dev);
1143 if (r < 0)
1144 return r;
1145
1146
1147 mutex_lock(&adev->lock_reset);
1148
1149
1150 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1151 struct amdgpu_ring *ring = adev->rings[i];
1152
1153 if (!ring || !ring->sched.thread)
1154 continue;
1155 kthread_park(ring->sched.thread);
1156 }
1157
1158 seq_printf(m, "run ib test:\n");
1159 r = amdgpu_ib_ring_tests(adev);
1160 if (r)
1161 seq_printf(m, "ib ring tests failed (%d).\n", r);
1162 else
1163 seq_printf(m, "ib ring tests passed.\n");
1164
1165
1166 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1167 struct amdgpu_ring *ring = adev->rings[i];
1168
1169 if (!ring || !ring->sched.thread)
1170 continue;
1171 kthread_unpark(ring->sched.thread);
1172 }
1173
1174 mutex_unlock(&adev->lock_reset);
1175
1176 pm_runtime_mark_last_busy(dev->dev);
1177 pm_runtime_put_autosuspend(dev->dev);
1178
1179 return 0;
1180}
1181
1182static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
1183{
1184 struct drm_info_node *node = (struct drm_info_node *) m->private;
1185 struct drm_device *dev = node->minor->dev;
1186 struct amdgpu_device *adev = dev->dev_private;
1187
1188 seq_write(m, adev->bios, adev->bios_size);
1189 return 0;
1190}
1191
1192static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
1193{
1194 struct drm_info_node *node = (struct drm_info_node *)m->private;
1195 struct drm_device *dev = node->minor->dev;
1196 struct amdgpu_device *adev = dev->dev_private;
1197 int r;
1198
1199 r = pm_runtime_get_sync(dev->dev);
1200 if (r < 0)
1201 return r;
1202
1203 seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
1204
1205 pm_runtime_mark_last_busy(dev->dev);
1206 pm_runtime_put_autosuspend(dev->dev);
1207
1208 return 0;
1209}
1210
1211static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
1212{
1213 struct drm_info_node *node = (struct drm_info_node *)m->private;
1214 struct drm_device *dev = node->minor->dev;
1215 struct amdgpu_device *adev = dev->dev_private;
1216 int r;
1217
1218 r = pm_runtime_get_sync(dev->dev);
1219 if (r < 0)
1220 return r;
1221
1222 seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
1223
1224 pm_runtime_mark_last_busy(dev->dev);
1225 pm_runtime_put_autosuspend(dev->dev);
1226
1227 return 0;
1228}
1229
1230static const struct drm_info_list amdgpu_debugfs_list[] = {
1231 {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
1232 {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
1233 {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
1234 {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
1235};
1236
1237static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
1238 struct dma_fence **fences)
1239{
1240 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1241 uint32_t sync_seq, last_seq;
1242
1243 last_seq = atomic_read(&ring->fence_drv.last_seq);
1244 sync_seq = ring->fence_drv.sync_seq;
1245
1246 last_seq &= drv->num_fences_mask;
1247 sync_seq &= drv->num_fences_mask;
1248
1249 do {
1250 struct dma_fence *fence, **ptr;
1251
1252 ++last_seq;
1253 last_seq &= drv->num_fences_mask;
1254 ptr = &drv->fences[last_seq];
1255
1256 fence = rcu_dereference_protected(*ptr, 1);
1257 RCU_INIT_POINTER(*ptr, NULL);
1258
1259 if (!fence)
1260 continue;
1261
1262 fences[last_seq] = fence;
1263
1264 } while (last_seq != sync_seq);
1265}
1266
1267static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1268 int length)
1269{
1270 int i;
1271 struct dma_fence *fence;
1272
1273 for (i = 0; i < length; i++) {
1274 fence = fences[i];
1275 if (!fence)
1276 continue;
1277 dma_fence_signal(fence);
1278 dma_fence_put(fence);
1279 }
1280}
1281
1282static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
1283{
1284 struct drm_sched_job *s_job;
1285 struct dma_fence *fence;
1286
1287 spin_lock(&sched->job_list_lock);
1288 list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
1289 fence = sched->ops->run_job(s_job);
1290 dma_fence_put(fence);
1291 }
1292 spin_unlock(&sched->job_list_lock);
1293}
1294
1295static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
1296{
1297 struct amdgpu_job *job;
1298 struct drm_sched_job *s_job, *tmp;
1299 uint32_t preempt_seq;
1300 struct dma_fence *fence, **ptr;
1301 struct amdgpu_fence_driver *drv = &ring->fence_drv;
1302 struct drm_gpu_scheduler *sched = &ring->sched;
1303 bool preempted = true;
1304
1305 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
1306 return;
1307
1308 preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
1309 if (preempt_seq <= atomic_read(&drv->last_seq)) {
1310 preempted = false;
1311 goto no_preempt;
1312 }
1313
1314 preempt_seq &= drv->num_fences_mask;
1315 ptr = &drv->fences[preempt_seq];
1316 fence = rcu_dereference_protected(*ptr, 1);
1317
1318no_preempt:
1319 spin_lock(&sched->job_list_lock);
1320 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
1321 if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
1322
1323 list_del_init(&s_job->node);
1324 sched->ops->free_job(s_job);
1325 continue;
1326 }
1327 job = to_amdgpu_job(s_job);
1328 if (preempted && job->fence == fence)
1329
1330 job->preemption_status |= AMDGPU_IB_PREEMPTED;
1331 }
1332 spin_unlock(&sched->job_list_lock);
1333}
1334
1335static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
1336{
1337 int r, resched, length;
1338 struct amdgpu_ring *ring;
1339 struct dma_fence **fences = NULL;
1340 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1341
1342 if (val >= AMDGPU_MAX_RINGS)
1343 return -EINVAL;
1344
1345 ring = adev->rings[val];
1346
1347 if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
1348 return -EINVAL;
1349
1350
1351 if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
1352 return -EBUSY;
1353
1354 length = ring->fence_drv.num_fences_mask + 1;
1355 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1356 if (!fences)
1357 return -ENOMEM;
1358
1359
1360 mutex_lock(&adev->lock_reset);
1361
1362
1363 kthread_park(ring->sched.thread);
1364
1365 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1366
1367
1368 r = amdgpu_ring_preempt_ib(ring);
1369 if (r) {
1370 DRM_WARN("failed to preempt ring %d\n", ring->idx);
1371 goto failure;
1372 }
1373
1374 amdgpu_fence_process(ring);
1375
1376 if (atomic_read(&ring->fence_drv.last_seq) !=
1377 ring->fence_drv.sync_seq) {
1378 DRM_INFO("ring %d was preempted\n", ring->idx);
1379
1380 amdgpu_ib_preempt_mark_partial_job(ring);
1381
1382
1383 amdgpu_ib_preempt_fences_swap(ring, fences);
1384
1385 amdgpu_fence_driver_force_completion(ring);
1386
1387
1388 amdgpu_ib_preempt_job_recovery(&ring->sched);
1389
1390
1391 amdgpu_fence_wait_empty(ring);
1392
1393
1394 amdgpu_ib_preempt_signal_fences(fences, length);
1395 }
1396
1397failure:
1398
1399 kthread_unpark(ring->sched.thread);
1400
1401 mutex_unlock(&adev->lock_reset);
1402
1403 ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
1404
1405 kfree(fences);
1406
1407 return 0;
1408}
1409
1410static int amdgpu_debugfs_sclk_set(void *data, u64 val)
1411{
1412 int ret = 0;
1413 uint32_t max_freq, min_freq;
1414 struct amdgpu_device *adev = (struct amdgpu_device *)data;
1415
1416 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1417 return -EINVAL;
1418
1419 ret = pm_runtime_get_sync(adev->ddev->dev);
1420 if (ret < 0)
1421 return ret;
1422
1423 if (is_support_sw_smu(adev)) {
1424 ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
1425 if (ret || val > max_freq || val < min_freq)
1426 return -EINVAL;
1427 ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
1428 } else {
1429 return 0;
1430 }
1431
1432 pm_runtime_mark_last_busy(adev->ddev->dev);
1433 pm_runtime_put_autosuspend(adev->ddev->dev);
1434
1435 if (ret)
1436 return -EINVAL;
1437
1438 return 0;
1439}
1440
1441DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
1442 amdgpu_debugfs_ib_preempt, "%llu\n");
1443
1444DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
1445 amdgpu_debugfs_sclk_set, "%llu\n");
1446
1447int amdgpu_debugfs_init(struct amdgpu_device *adev)
1448{
1449 int r, i;
1450
1451 adev->debugfs_preempt =
1452 debugfs_create_file("amdgpu_preempt_ib", 0600,
1453 adev->ddev->primary->debugfs_root, adev,
1454 &fops_ib_preempt);
1455 if (!(adev->debugfs_preempt)) {
1456 DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
1457 return -EIO;
1458 }
1459
1460 adev->smu.debugfs_sclk =
1461 debugfs_create_file("amdgpu_force_sclk", 0200,
1462 adev->ddev->primary->debugfs_root, adev,
1463 &fops_sclk_set);
1464 if (!(adev->smu.debugfs_sclk)) {
1465 DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
1466 return -EIO;
1467 }
1468
1469
1470 r = amdgpu_ttm_debugfs_init(adev);
1471 if (r) {
1472 DRM_ERROR("Failed to init debugfs\n");
1473 return r;
1474 }
1475
1476 r = amdgpu_debugfs_pm_init(adev);
1477 if (r) {
1478 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1479 return r;
1480 }
1481
1482 if (amdgpu_debugfs_sa_init(adev)) {
1483 dev_err(adev->dev, "failed to register debugfs file for SA\n");
1484 }
1485
1486 if (amdgpu_debugfs_fence_init(adev))
1487 dev_err(adev->dev, "fence debugfs file creation failed\n");
1488
1489 r = amdgpu_debugfs_gem_init(adev);
1490 if (r)
1491 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1492
1493 r = amdgpu_debugfs_regs_init(adev);
1494 if (r)
1495 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1496
1497 r = amdgpu_debugfs_firmware_init(adev);
1498 if (r)
1499 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1500
1501#if defined(CONFIG_DRM_AMD_DC)
1502 if (amdgpu_device_has_dc_support(adev)) {
1503 if (dtn_debugfs_init(adev))
1504 DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
1505 }
1506#endif
1507
1508 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1509 struct amdgpu_ring *ring = adev->rings[i];
1510
1511 if (!ring)
1512 continue;
1513
1514 if (amdgpu_debugfs_ring_init(adev, ring)) {
1515 DRM_ERROR("Failed to register debugfs file for rings !\n");
1516 }
1517 }
1518
1519 amdgpu_ras_debugfs_create_all(adev);
1520
1521 amdgpu_debugfs_autodump_init(adev);
1522
1523 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
1524 ARRAY_SIZE(amdgpu_debugfs_list));
1525}
1526
1527#else
1528int amdgpu_debugfs_init(struct amdgpu_device *adev)
1529{
1530 return 0;
1531}
1532int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
1533{
1534 return 0;
1535}
1536#endif
1537