1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include "amdgpu.h"
30#include <drm/drm_debugfs.h>
31#include <drm/amdgpu_drm.h>
32#include "amdgpu_sched.h"
33#include "amdgpu_uvd.h"
34#include "amdgpu_vce.h"
35#include "atom.h"
36
37#include <linux/vga_switcheroo.h>
38#include <linux/slab.h>
39#include <linux/uaccess.h>
40#include <linux/pci.h>
41#include <linux/pm_runtime.h>
42#include "amdgpu_amdkfd.h"
43#include "amdgpu_gem.h"
44#include "amdgpu_display.h"
45#include "amdgpu_ras.h"
46
47void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
48{
49 struct amdgpu_gpu_instance *gpu_instance;
50 int i;
51
52 mutex_lock(&mgpu_info.mutex);
53
54 for (i = 0; i < mgpu_info.num_gpu; i++) {
55 gpu_instance = &(mgpu_info.gpu_ins[i]);
56 if (gpu_instance->adev == adev) {
57 mgpu_info.gpu_ins[i] =
58 mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
59 mgpu_info.num_gpu--;
60 if (adev->flags & AMD_IS_APU)
61 mgpu_info.num_apu--;
62 else
63 mgpu_info.num_dgpu--;
64 break;
65 }
66 }
67
68 mutex_unlock(&mgpu_info.mutex);
69}
70
71
72
73
74
75
76
77
78
79void amdgpu_driver_unload_kms(struct drm_device *dev)
80{
81 struct amdgpu_device *adev = dev->dev_private;
82
83 if (adev == NULL)
84 return;
85
86 amdgpu_unregister_gpu_instance(adev);
87
88 if (adev->rmmio == NULL)
89 goto done_free;
90
91 if (amdgpu_sriov_vf(adev))
92 amdgpu_virt_request_full_gpu(adev, false);
93
94 if (adev->runpm) {
95 pm_runtime_get_sync(dev->dev);
96 pm_runtime_forbid(dev->dev);
97 }
98
99 amdgpu_acpi_fini(adev);
100
101 amdgpu_device_fini(adev);
102
103done_free:
104 kfree(adev);
105 dev->dev_private = NULL;
106}
107
108void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
109{
110 struct amdgpu_gpu_instance *gpu_instance;
111
112 mutex_lock(&mgpu_info.mutex);
113
114 if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
115 DRM_ERROR("Cannot register more gpu instance\n");
116 mutex_unlock(&mgpu_info.mutex);
117 return;
118 }
119
120 gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
121 gpu_instance->adev = adev;
122 gpu_instance->mgpu_fan_enabled = 0;
123
124 mgpu_info.num_gpu++;
125 if (adev->flags & AMD_IS_APU)
126 mgpu_info.num_apu++;
127 else
128 mgpu_info.num_dgpu++;
129
130 mutex_unlock(&mgpu_info.mutex);
131}
132
133
134
135
136
137
138
139
140
141
142int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
143{
144 struct amdgpu_device *adev;
145 int r, acpi_status;
146
147 adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
148 if (adev == NULL) {
149 return -ENOMEM;
150 }
151 dev->dev_private = (void *)adev;
152
153 if (amdgpu_has_atpx() &&
154 (amdgpu_is_atpx_hybrid() ||
155 amdgpu_has_atpx_dgpu_power_cntl()) &&
156 ((flags & AMD_IS_APU) == 0) &&
157 !pci_is_thunderbolt_attached(dev->pdev))
158 flags |= AMD_IS_PX;
159
160
161
162
163
164
165
166 r = amdgpu_device_init(adev, dev, dev->pdev, flags);
167 if (r) {
168 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
169 goto out;
170 }
171
172 if (amdgpu_device_supports_boco(dev) &&
173 (amdgpu_runtime_pm != 0))
174 adev->runpm = true;
175 else if (amdgpu_device_supports_baco(dev) &&
176 (amdgpu_runtime_pm > 0))
177 adev->runpm = true;
178
179
180
181
182 if (!r) {
183 acpi_status = amdgpu_acpi_init(adev);
184 if (acpi_status)
185 dev_dbg(&dev->pdev->dev,
186 "Error during ACPI methods call\n");
187 }
188
189 if (adev->runpm) {
190 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
191 pm_runtime_use_autosuspend(dev->dev);
192 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
193 pm_runtime_set_active(dev->dev);
194 pm_runtime_allow(dev->dev);
195 pm_runtime_mark_last_busy(dev->dev);
196 pm_runtime_put_autosuspend(dev->dev);
197 }
198
199out:
200 if (r) {
201
202 if (adev->rmmio && adev->runpm)
203 pm_runtime_put_noidle(dev->dev);
204 amdgpu_driver_unload_kms(dev);
205 }
206
207 return r;
208}
209
210static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
211 struct drm_amdgpu_query_fw *query_fw,
212 struct amdgpu_device *adev)
213{
214 switch (query_fw->fw_type) {
215 case AMDGPU_INFO_FW_VCE:
216 fw_info->ver = adev->vce.fw_version;
217 fw_info->feature = adev->vce.fb_version;
218 break;
219 case AMDGPU_INFO_FW_UVD:
220 fw_info->ver = adev->uvd.fw_version;
221 fw_info->feature = 0;
222 break;
223 case AMDGPU_INFO_FW_VCN:
224 fw_info->ver = adev->vcn.fw_version;
225 fw_info->feature = 0;
226 break;
227 case AMDGPU_INFO_FW_GMC:
228 fw_info->ver = adev->gmc.fw_version;
229 fw_info->feature = 0;
230 break;
231 case AMDGPU_INFO_FW_GFX_ME:
232 fw_info->ver = adev->gfx.me_fw_version;
233 fw_info->feature = adev->gfx.me_feature_version;
234 break;
235 case AMDGPU_INFO_FW_GFX_PFP:
236 fw_info->ver = adev->gfx.pfp_fw_version;
237 fw_info->feature = adev->gfx.pfp_feature_version;
238 break;
239 case AMDGPU_INFO_FW_GFX_CE:
240 fw_info->ver = adev->gfx.ce_fw_version;
241 fw_info->feature = adev->gfx.ce_feature_version;
242 break;
243 case AMDGPU_INFO_FW_GFX_RLC:
244 fw_info->ver = adev->gfx.rlc_fw_version;
245 fw_info->feature = adev->gfx.rlc_feature_version;
246 break;
247 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
248 fw_info->ver = adev->gfx.rlc_srlc_fw_version;
249 fw_info->feature = adev->gfx.rlc_srlc_feature_version;
250 break;
251 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
252 fw_info->ver = adev->gfx.rlc_srlg_fw_version;
253 fw_info->feature = adev->gfx.rlc_srlg_feature_version;
254 break;
255 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
256 fw_info->ver = adev->gfx.rlc_srls_fw_version;
257 fw_info->feature = adev->gfx.rlc_srls_feature_version;
258 break;
259 case AMDGPU_INFO_FW_GFX_MEC:
260 if (query_fw->index == 0) {
261 fw_info->ver = adev->gfx.mec_fw_version;
262 fw_info->feature = adev->gfx.mec_feature_version;
263 } else if (query_fw->index == 1) {
264 fw_info->ver = adev->gfx.mec2_fw_version;
265 fw_info->feature = adev->gfx.mec2_feature_version;
266 } else
267 return -EINVAL;
268 break;
269 case AMDGPU_INFO_FW_SMC:
270 fw_info->ver = adev->pm.fw_version;
271 fw_info->feature = 0;
272 break;
273 case AMDGPU_INFO_FW_TA:
274 if (query_fw->index > 1)
275 return -EINVAL;
276 if (query_fw->index == 0) {
277 fw_info->ver = adev->psp.ta_fw_version;
278 fw_info->feature = adev->psp.ta_xgmi_ucode_version;
279 } else {
280 fw_info->ver = adev->psp.ta_fw_version;
281 fw_info->feature = adev->psp.ta_ras_ucode_version;
282 }
283 break;
284 case AMDGPU_INFO_FW_SDMA:
285 if (query_fw->index >= adev->sdma.num_instances)
286 return -EINVAL;
287 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
288 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
289 break;
290 case AMDGPU_INFO_FW_SOS:
291 fw_info->ver = adev->psp.sos_fw_version;
292 fw_info->feature = adev->psp.sos_feature_version;
293 break;
294 case AMDGPU_INFO_FW_ASD:
295 fw_info->ver = adev->psp.asd_fw_version;
296 fw_info->feature = adev->psp.asd_feature_version;
297 break;
298 case AMDGPU_INFO_FW_DMCU:
299 fw_info->ver = adev->dm.dmcu_fw_version;
300 fw_info->feature = 0;
301 break;
302 case AMDGPU_INFO_FW_DMCUB:
303 fw_info->ver = adev->dm.dmcub_fw_version;
304 fw_info->feature = 0;
305 break;
306 default:
307 return -EINVAL;
308 }
309 return 0;
310}
311
312static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
313 struct drm_amdgpu_info *info,
314 struct drm_amdgpu_info_hw_ip *result)
315{
316 uint32_t ib_start_alignment = 0;
317 uint32_t ib_size_alignment = 0;
318 enum amd_ip_block_type type;
319 unsigned int num_rings = 0;
320 unsigned int i, j;
321
322 if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
323 return -EINVAL;
324
325 switch (info->query_hw_ip.type) {
326 case AMDGPU_HW_IP_GFX:
327 type = AMD_IP_BLOCK_TYPE_GFX;
328 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
329 if (adev->gfx.gfx_ring[i].sched.ready)
330 ++num_rings;
331 ib_start_alignment = 32;
332 ib_size_alignment = 32;
333 break;
334 case AMDGPU_HW_IP_COMPUTE:
335 type = AMD_IP_BLOCK_TYPE_GFX;
336 for (i = 0; i < adev->gfx.num_compute_rings; i++)
337 if (adev->gfx.compute_ring[i].sched.ready)
338 ++num_rings;
339 ib_start_alignment = 32;
340 ib_size_alignment = 32;
341 break;
342 case AMDGPU_HW_IP_DMA:
343 type = AMD_IP_BLOCK_TYPE_SDMA;
344 for (i = 0; i < adev->sdma.num_instances; i++)
345 if (adev->sdma.instance[i].ring.sched.ready)
346 ++num_rings;
347 ib_start_alignment = 256;
348 ib_size_alignment = 4;
349 break;
350 case AMDGPU_HW_IP_UVD:
351 type = AMD_IP_BLOCK_TYPE_UVD;
352 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
353 if (adev->uvd.harvest_config & (1 << i))
354 continue;
355
356 if (adev->uvd.inst[i].ring.sched.ready)
357 ++num_rings;
358 }
359 ib_start_alignment = 64;
360 ib_size_alignment = 64;
361 break;
362 case AMDGPU_HW_IP_VCE:
363 type = AMD_IP_BLOCK_TYPE_VCE;
364 for (i = 0; i < adev->vce.num_rings; i++)
365 if (adev->vce.ring[i].sched.ready)
366 ++num_rings;
367 ib_start_alignment = 4;
368 ib_size_alignment = 1;
369 break;
370 case AMDGPU_HW_IP_UVD_ENC:
371 type = AMD_IP_BLOCK_TYPE_UVD;
372 for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
373 if (adev->uvd.harvest_config & (1 << i))
374 continue;
375
376 for (j = 0; j < adev->uvd.num_enc_rings; j++)
377 if (adev->uvd.inst[i].ring_enc[j].sched.ready)
378 ++num_rings;
379 }
380 ib_start_alignment = 64;
381 ib_size_alignment = 64;
382 break;
383 case AMDGPU_HW_IP_VCN_DEC:
384 type = AMD_IP_BLOCK_TYPE_VCN;
385 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
386 if (adev->uvd.harvest_config & (1 << i))
387 continue;
388
389 if (adev->vcn.inst[i].ring_dec.sched.ready)
390 ++num_rings;
391 }
392 ib_start_alignment = 16;
393 ib_size_alignment = 16;
394 break;
395 case AMDGPU_HW_IP_VCN_ENC:
396 type = AMD_IP_BLOCK_TYPE_VCN;
397 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
398 if (adev->uvd.harvest_config & (1 << i))
399 continue;
400
401 for (j = 0; j < adev->vcn.num_enc_rings; j++)
402 if (adev->vcn.inst[i].ring_enc[j].sched.ready)
403 ++num_rings;
404 }
405 ib_start_alignment = 64;
406 ib_size_alignment = 1;
407 break;
408 case AMDGPU_HW_IP_VCN_JPEG:
409 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
410 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
411
412 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
413 if (adev->jpeg.harvest_config & (1 << i))
414 continue;
415
416 if (adev->jpeg.inst[i].ring_dec.sched.ready)
417 ++num_rings;
418 }
419 ib_start_alignment = 16;
420 ib_size_alignment = 16;
421 break;
422 default:
423 return -EINVAL;
424 }
425
426 for (i = 0; i < adev->num_ip_blocks; i++)
427 if (adev->ip_blocks[i].version->type == type &&
428 adev->ip_blocks[i].status.valid)
429 break;
430
431 if (i == adev->num_ip_blocks)
432 return 0;
433
434 num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
435 num_rings);
436
437 result->hw_ip_version_major = adev->ip_blocks[i].version->major;
438 result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
439 result->capabilities_flags = 0;
440 result->available_rings = (1 << num_rings) - 1;
441 result->ib_start_alignment = ib_start_alignment;
442 result->ib_size_alignment = ib_size_alignment;
443 return 0;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
462{
463 struct amdgpu_device *adev = dev->dev_private;
464 struct drm_amdgpu_info *info = data;
465 struct amdgpu_mode_info *minfo = &adev->mode_info;
466 void __user *out = (void __user *)(uintptr_t)info->return_pointer;
467 uint32_t size = info->return_size;
468 struct drm_crtc *crtc;
469 uint32_t ui32 = 0;
470 uint64_t ui64 = 0;
471 int i, found;
472 int ui32_size = sizeof(ui32);
473
474 if (!info->return_size || !info->return_pointer)
475 return -EINVAL;
476
477 switch (info->query) {
478 case AMDGPU_INFO_ACCEL_WORKING:
479 ui32 = adev->accel_working;
480 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
481 case AMDGPU_INFO_CRTC_FROM_ID:
482 for (i = 0, found = 0; i < adev->mode_info.num_crtc; i++) {
483 crtc = (struct drm_crtc *)minfo->crtcs[i];
484 if (crtc && crtc->base.id == info->mode_crtc.id) {
485 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
486 ui32 = amdgpu_crtc->crtc_id;
487 found = 1;
488 break;
489 }
490 }
491 if (!found) {
492 DRM_DEBUG_KMS("unknown crtc id %d\n", info->mode_crtc.id);
493 return -EINVAL;
494 }
495 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
496 case AMDGPU_INFO_HW_IP_INFO: {
497 struct drm_amdgpu_info_hw_ip ip = {};
498 int ret;
499
500 ret = amdgpu_hw_ip_info(adev, info, &ip);
501 if (ret)
502 return ret;
503
504 ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip)));
505 return ret ? -EFAULT : 0;
506 }
507 case AMDGPU_INFO_HW_IP_COUNT: {
508 enum amd_ip_block_type type;
509 uint32_t count = 0;
510
511 switch (info->query_hw_ip.type) {
512 case AMDGPU_HW_IP_GFX:
513 type = AMD_IP_BLOCK_TYPE_GFX;
514 break;
515 case AMDGPU_HW_IP_COMPUTE:
516 type = AMD_IP_BLOCK_TYPE_GFX;
517 break;
518 case AMDGPU_HW_IP_DMA:
519 type = AMD_IP_BLOCK_TYPE_SDMA;
520 break;
521 case AMDGPU_HW_IP_UVD:
522 type = AMD_IP_BLOCK_TYPE_UVD;
523 break;
524 case AMDGPU_HW_IP_VCE:
525 type = AMD_IP_BLOCK_TYPE_VCE;
526 break;
527 case AMDGPU_HW_IP_UVD_ENC:
528 type = AMD_IP_BLOCK_TYPE_UVD;
529 break;
530 case AMDGPU_HW_IP_VCN_DEC:
531 case AMDGPU_HW_IP_VCN_ENC:
532 type = AMD_IP_BLOCK_TYPE_VCN;
533 break;
534 case AMDGPU_HW_IP_VCN_JPEG:
535 type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
536 AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 for (i = 0; i < adev->num_ip_blocks; i++)
543 if (adev->ip_blocks[i].version->type == type &&
544 adev->ip_blocks[i].status.valid &&
545 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
546 count++;
547
548 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
549 }
550 case AMDGPU_INFO_TIMESTAMP:
551 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
552 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
553 case AMDGPU_INFO_FW_VERSION: {
554 struct drm_amdgpu_info_firmware fw_info;
555 int ret;
556
557
558 if (info->query_fw.ip_instance != 0)
559 return -EINVAL;
560
561 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
562 if (ret)
563 return ret;
564
565 return copy_to_user(out, &fw_info,
566 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
567 }
568 case AMDGPU_INFO_NUM_BYTES_MOVED:
569 ui64 = atomic64_read(&adev->num_bytes_moved);
570 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
571 case AMDGPU_INFO_NUM_EVICTIONS:
572 ui64 = atomic64_read(&adev->num_evictions);
573 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
574 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
575 ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
576 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
577 case AMDGPU_INFO_VRAM_USAGE:
578 ui64 = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
579 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
580 case AMDGPU_INFO_VIS_VRAM_USAGE:
581 ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
582 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
583 case AMDGPU_INFO_GTT_USAGE:
584 ui64 = amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
585 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
586 case AMDGPU_INFO_GDS_CONFIG: {
587 struct drm_amdgpu_info_gds gds_info;
588
589 memset(&gds_info, 0, sizeof(gds_info));
590 gds_info.compute_partition_size = adev->gds.gds_size;
591 gds_info.gds_total_size = adev->gds.gds_size;
592 gds_info.gws_per_compute_partition = adev->gds.gws_size;
593 gds_info.oa_per_compute_partition = adev->gds.oa_size;
594 return copy_to_user(out, &gds_info,
595 min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
596 }
597 case AMDGPU_INFO_VRAM_GTT: {
598 struct drm_amdgpu_info_vram_gtt vram_gtt;
599
600 vram_gtt.vram_size = adev->gmc.real_vram_size -
601 atomic64_read(&adev->vram_pin_size) -
602 AMDGPU_VM_RESERVED_VRAM;
603 vram_gtt.vram_cpu_accessible_size =
604 min(adev->gmc.visible_vram_size -
605 atomic64_read(&adev->visible_pin_size),
606 vram_gtt.vram_size);
607 vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
608 vram_gtt.gtt_size *= PAGE_SIZE;
609 vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
610 return copy_to_user(out, &vram_gtt,
611 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
612 }
613 case AMDGPU_INFO_MEMORY: {
614 struct drm_amdgpu_memory_info mem;
615
616 memset(&mem, 0, sizeof(mem));
617 mem.vram.total_heap_size = adev->gmc.real_vram_size;
618 mem.vram.usable_heap_size = adev->gmc.real_vram_size -
619 atomic64_read(&adev->vram_pin_size) -
620 AMDGPU_VM_RESERVED_VRAM;
621 mem.vram.heap_usage =
622 amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
623 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
624
625 mem.cpu_accessible_vram.total_heap_size =
626 adev->gmc.visible_vram_size;
627 mem.cpu_accessible_vram.usable_heap_size =
628 min(adev->gmc.visible_vram_size -
629 atomic64_read(&adev->visible_pin_size),
630 mem.vram.usable_heap_size);
631 mem.cpu_accessible_vram.heap_usage =
632 amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
633 mem.cpu_accessible_vram.max_allocation =
634 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
635
636 mem.gtt.total_heap_size = adev->mman.bdev.man[TTM_PL_TT].size;
637 mem.gtt.total_heap_size *= PAGE_SIZE;
638 mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
639 atomic64_read(&adev->gart_pin_size);
640 mem.gtt.heap_usage =
641 amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]);
642 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
643
644 return copy_to_user(out, &mem,
645 min((size_t)size, sizeof(mem)))
646 ? -EFAULT : 0;
647 }
648 case AMDGPU_INFO_READ_MMR_REG: {
649 unsigned n, alloc_size;
650 uint32_t *regs;
651 unsigned se_num = (info->read_mmr_reg.instance >>
652 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
653 AMDGPU_INFO_MMR_SE_INDEX_MASK;
654 unsigned sh_num = (info->read_mmr_reg.instance >>
655 AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
656 AMDGPU_INFO_MMR_SH_INDEX_MASK;
657
658
659
660 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
661 se_num = 0xffffffff;
662 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
663 sh_num = 0xffffffff;
664
665 if (info->read_mmr_reg.count > 128)
666 return -EINVAL;
667
668 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
669 if (!regs)
670 return -ENOMEM;
671 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
672
673 amdgpu_gfx_off_ctrl(adev, false);
674 for (i = 0; i < info->read_mmr_reg.count; i++) {
675 if (amdgpu_asic_read_register(adev, se_num, sh_num,
676 info->read_mmr_reg.dword_offset + i,
677 ®s[i])) {
678 DRM_DEBUG_KMS("unallowed offset %#x\n",
679 info->read_mmr_reg.dword_offset + i);
680 kfree(regs);
681 amdgpu_gfx_off_ctrl(adev, true);
682 return -EFAULT;
683 }
684 }
685 amdgpu_gfx_off_ctrl(adev, true);
686 n = copy_to_user(out, regs, min(size, alloc_size));
687 kfree(regs);
688 return n ? -EFAULT : 0;
689 }
690 case AMDGPU_INFO_DEV_INFO: {
691 struct drm_amdgpu_info_device dev_info = {};
692 uint64_t vm_size;
693
694 dev_info.device_id = dev->pdev->device;
695 dev_info.chip_rev = adev->rev_id;
696 dev_info.external_rev = adev->external_rev_id;
697 dev_info.pci_rev = dev->pdev->revision;
698 dev_info.family = adev->family;
699 dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
700 dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
701
702 dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
703 if (adev->pm.dpm_enabled) {
704 dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
705 dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
706 } else {
707 dev_info.max_engine_clock = adev->clock.default_sclk * 10;
708 dev_info.max_memory_clock = adev->clock.default_mclk * 10;
709 }
710 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
711 dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
712 adev->gfx.config.max_shader_engines;
713 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
714 dev_info._pad = 0;
715 dev_info.ids_flags = 0;
716 if (adev->flags & AMD_IS_APU)
717 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
718 if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
719 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
720
721 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
722 vm_size -= AMDGPU_VA_RESERVED_SIZE;
723
724
725 if (adev->vce.fw_version &&
726 adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
727 vm_size = min(vm_size, 1ULL << 40);
728
729 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
730 dev_info.virtual_address_max =
731 min(vm_size, AMDGPU_GMC_HOLE_START);
732
733 if (vm_size > AMDGPU_GMC_HOLE_START) {
734 dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
735 dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
736 }
737 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
738 dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
739 dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
740 dev_info.cu_active_number = adev->gfx.cu_info.number;
741 dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
742 dev_info.ce_ram_size = adev->gfx.ce_ram_size;
743 memcpy(&dev_info.cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
744 sizeof(adev->gfx.cu_info.ao_cu_bitmap));
745 memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
746 sizeof(adev->gfx.cu_info.bitmap));
747 dev_info.vram_type = adev->gmc.vram_type;
748 dev_info.vram_bit_width = adev->gmc.vram_width;
749 dev_info.vce_harvest_config = adev->vce.harvest_config;
750 dev_info.gc_double_offchip_lds_buf =
751 adev->gfx.config.double_offchip_lds_buf;
752 dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
753 dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
754 dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
755 dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
756 dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
757 dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
758 dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
759
760 if (adev->family >= AMDGPU_FAMILY_NV)
761 dev_info.pa_sc_tile_steering_override =
762 adev->gfx.config.pa_sc_tile_steering_override;
763
764 dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
765
766 return copy_to_user(out, &dev_info,
767 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
768 }
769 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
770 unsigned i;
771 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
772 struct amd_vce_state *vce_state;
773
774 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
775 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
776 if (vce_state) {
777 vce_clk_table.entries[i].sclk = vce_state->sclk;
778 vce_clk_table.entries[i].mclk = vce_state->mclk;
779 vce_clk_table.entries[i].eclk = vce_state->evclk;
780 vce_clk_table.num_valid_entries++;
781 }
782 }
783
784 return copy_to_user(out, &vce_clk_table,
785 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
786 }
787 case AMDGPU_INFO_VBIOS: {
788 uint32_t bios_size = adev->bios_size;
789
790 switch (info->vbios_info.type) {
791 case AMDGPU_INFO_VBIOS_SIZE:
792 return copy_to_user(out, &bios_size,
793 min((size_t)size, sizeof(bios_size)))
794 ? -EFAULT : 0;
795 case AMDGPU_INFO_VBIOS_IMAGE: {
796 uint8_t *bios;
797 uint32_t bios_offset = info->vbios_info.offset;
798
799 if (bios_offset >= bios_size)
800 return -EINVAL;
801
802 bios = adev->bios + bios_offset;
803 return copy_to_user(out, bios,
804 min((size_t)size, (size_t)(bios_size - bios_offset)))
805 ? -EFAULT : 0;
806 }
807 default:
808 DRM_DEBUG_KMS("Invalid request %d\n",
809 info->vbios_info.type);
810 return -EINVAL;
811 }
812 }
813 case AMDGPU_INFO_NUM_HANDLES: {
814 struct drm_amdgpu_info_num_handles handle;
815
816 switch (info->query_hw_ip.type) {
817 case AMDGPU_HW_IP_UVD:
818
819 if (adev->asic_type < CHIP_POLARIS10) {
820 handle.uvd_max_handles = adev->uvd.max_handles;
821 handle.uvd_used_handles = amdgpu_uvd_used_handles(adev);
822
823 return copy_to_user(out, &handle,
824 min((size_t)size, sizeof(handle))) ? -EFAULT : 0;
825 } else {
826 return -ENODATA;
827 }
828
829 break;
830 default:
831 return -EINVAL;
832 }
833 }
834 case AMDGPU_INFO_SENSOR: {
835 if (!adev->pm.dpm_enabled)
836 return -ENOENT;
837
838 switch (info->sensor_info.type) {
839 case AMDGPU_INFO_SENSOR_GFX_SCLK:
840
841 if (amdgpu_dpm_read_sensor(adev,
842 AMDGPU_PP_SENSOR_GFX_SCLK,
843 (void *)&ui32, &ui32_size)) {
844 return -EINVAL;
845 }
846 ui32 /= 100;
847 break;
848 case AMDGPU_INFO_SENSOR_GFX_MCLK:
849
850 if (amdgpu_dpm_read_sensor(adev,
851 AMDGPU_PP_SENSOR_GFX_MCLK,
852 (void *)&ui32, &ui32_size)) {
853 return -EINVAL;
854 }
855 ui32 /= 100;
856 break;
857 case AMDGPU_INFO_SENSOR_GPU_TEMP:
858
859 if (amdgpu_dpm_read_sensor(adev,
860 AMDGPU_PP_SENSOR_GPU_TEMP,
861 (void *)&ui32, &ui32_size)) {
862 return -EINVAL;
863 }
864 break;
865 case AMDGPU_INFO_SENSOR_GPU_LOAD:
866
867 if (amdgpu_dpm_read_sensor(adev,
868 AMDGPU_PP_SENSOR_GPU_LOAD,
869 (void *)&ui32, &ui32_size)) {
870 return -EINVAL;
871 }
872 break;
873 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
874
875 if (amdgpu_dpm_read_sensor(adev,
876 AMDGPU_PP_SENSOR_GPU_POWER,
877 (void *)&ui32, &ui32_size)) {
878 return -EINVAL;
879 }
880 ui32 >>= 8;
881 break;
882 case AMDGPU_INFO_SENSOR_VDDNB:
883
884 if (amdgpu_dpm_read_sensor(adev,
885 AMDGPU_PP_SENSOR_VDDNB,
886 (void *)&ui32, &ui32_size)) {
887 return -EINVAL;
888 }
889 break;
890 case AMDGPU_INFO_SENSOR_VDDGFX:
891
892 if (amdgpu_dpm_read_sensor(adev,
893 AMDGPU_PP_SENSOR_VDDGFX,
894 (void *)&ui32, &ui32_size)) {
895 return -EINVAL;
896 }
897 break;
898 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
899
900 if (amdgpu_dpm_read_sensor(adev,
901 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
902 (void *)&ui32, &ui32_size)) {
903 return -EINVAL;
904 }
905 ui32 /= 100;
906 break;
907 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
908
909 if (amdgpu_dpm_read_sensor(adev,
910 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
911 (void *)&ui32, &ui32_size)) {
912 return -EINVAL;
913 }
914 ui32 /= 100;
915 break;
916 default:
917 DRM_DEBUG_KMS("Invalid request %d\n",
918 info->sensor_info.type);
919 return -EINVAL;
920 }
921 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
922 }
923 case AMDGPU_INFO_VRAM_LOST_COUNTER:
924 ui32 = atomic_read(&adev->vram_lost_counter);
925 return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
926 case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
927 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
928 uint64_t ras_mask;
929
930 if (!ras)
931 return -EINVAL;
932 ras_mask = (uint64_t)ras->supported << 32 | ras->features;
933
934 return copy_to_user(out, &ras_mask,
935 min_t(u64, size, sizeof(ras_mask))) ?
936 -EFAULT : 0;
937 }
938 default:
939 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
940 return -EINVAL;
941 }
942 return 0;
943}
944
945
946
947
948
949
950
951
952
953
954
955
956void amdgpu_driver_lastclose_kms(struct drm_device *dev)
957{
958 drm_fb_helper_lastclose(dev);
959 vga_switcheroo_process_delayed_switch();
960}
961
962
963
964
965
966
967
968
969
970
971int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
972{
973 struct amdgpu_device *adev = dev->dev_private;
974 struct amdgpu_fpriv *fpriv;
975 int r, pasid;
976
977
978 flush_delayed_work(&adev->delayed_init_work);
979
980
981 if (amdgpu_ras_intr_triggered()) {
982 DRM_ERROR("RAS Intr triggered, device disabled!!");
983 return -EHWPOISON;
984 }
985
986 file_priv->driver_priv = NULL;
987
988 r = pm_runtime_get_sync(dev->dev);
989 if (r < 0)
990 return r;
991
992 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
993 if (unlikely(!fpriv)) {
994 r = -ENOMEM;
995 goto out_suspend;
996 }
997
998 pasid = amdgpu_pasid_alloc(16);
999 if (pasid < 0) {
1000 dev_warn(adev->dev, "No more PASIDs available!");
1001 pasid = 0;
1002 }
1003 r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
1004 if (r)
1005 goto error_pasid;
1006
1007 fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
1008 if (!fpriv->prt_va) {
1009 r = -ENOMEM;
1010 goto error_vm;
1011 }
1012
1013 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1014 uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
1015
1016 r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
1017 &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
1018 if (r)
1019 goto error_vm;
1020 }
1021
1022 mutex_init(&fpriv->bo_list_lock);
1023 idr_init(&fpriv->bo_list_handles);
1024
1025 amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
1026
1027 file_priv->driver_priv = fpriv;
1028 goto out_suspend;
1029
1030error_vm:
1031 amdgpu_vm_fini(adev, &fpriv->vm);
1032
1033error_pasid:
1034 if (pasid)
1035 amdgpu_pasid_free(pasid);
1036
1037 kfree(fpriv);
1038
1039out_suspend:
1040 pm_runtime_mark_last_busy(dev->dev);
1041 pm_runtime_put_autosuspend(dev->dev);
1042
1043 return r;
1044}
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054void amdgpu_driver_postclose_kms(struct drm_device *dev,
1055 struct drm_file *file_priv)
1056{
1057 struct amdgpu_device *adev = dev->dev_private;
1058 struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1059 struct amdgpu_bo_list *list;
1060 struct amdgpu_bo *pd;
1061 unsigned int pasid;
1062 int handle;
1063
1064 if (!fpriv)
1065 return;
1066
1067 pm_runtime_get_sync(dev->dev);
1068
1069 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
1070 amdgpu_uvd_free_handles(adev, file_priv);
1071 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
1072 amdgpu_vce_free_handles(adev, file_priv);
1073
1074 amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
1075
1076 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
1077
1078 BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
1079 amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
1080 fpriv->csa_va = NULL;
1081 amdgpu_bo_unreserve(adev->virt.csa_obj);
1082 }
1083
1084 pasid = fpriv->vm.pasid;
1085 pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
1086
1087 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
1088 amdgpu_vm_fini(adev, &fpriv->vm);
1089
1090 if (pasid)
1091 amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
1092 amdgpu_bo_unref(&pd);
1093
1094 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
1095 amdgpu_bo_list_put(list);
1096
1097 idr_destroy(&fpriv->bo_list_handles);
1098 mutex_destroy(&fpriv->bo_list_lock);
1099
1100 kfree(fpriv);
1101 file_priv->driver_priv = NULL;
1102
1103 pm_runtime_mark_last_busy(dev->dev);
1104 pm_runtime_put_autosuspend(dev->dev);
1105}
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
1120{
1121 struct amdgpu_device *adev = dev->dev_private;
1122 int vpos, hpos, stat;
1123 u32 count;
1124
1125 if (pipe >= adev->mode_info.num_crtc) {
1126 DRM_ERROR("Invalid crtc %u\n", pipe);
1127 return -EINVAL;
1128 }
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 if (adev->mode_info.crtcs[pipe]) {
1139
1140
1141
1142 do {
1143 count = amdgpu_display_vblank_get_counter(adev, pipe);
1144
1145
1146
1147
1148 stat = amdgpu_display_get_crtc_scanoutpos(
1149 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
1150 &vpos, &hpos, NULL, NULL,
1151 &adev->mode_info.crtcs[pipe]->base.hwmode);
1152 } while (count != amdgpu_display_vblank_get_counter(adev, pipe));
1153
1154 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
1155 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
1156 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
1157 } else {
1158 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1159 pipe, vpos);
1160
1161
1162
1163
1164
1165 if (vpos >= 0)
1166 count++;
1167 }
1168 } else {
1169
1170 count = amdgpu_display_vblank_get_counter(adev, pipe);
1171 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1172 }
1173
1174 return count;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1187{
1188 struct amdgpu_device *adev = dev->dev_private;
1189 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1190
1191 return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
1203{
1204 struct amdgpu_device *adev = dev->dev_private;
1205 int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
1206
1207 amdgpu_irq_put(adev, &adev->crtc_irq, idx);
1208}
1209
1210const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
1211 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1212 DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1213 DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1214 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
1215 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1216 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1217
1218 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1219 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1220 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1221 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1222 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1223 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1224 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1225 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1226 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1227 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
1228};
1229const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
1230
1231
1232
1233
1234#if defined(CONFIG_DEBUG_FS)
1235
1236static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
1237{
1238 struct drm_info_node *node = (struct drm_info_node *) m->private;
1239 struct drm_device *dev = node->minor->dev;
1240 struct amdgpu_device *adev = dev->dev_private;
1241 struct drm_amdgpu_info_firmware fw_info;
1242 struct drm_amdgpu_query_fw query_fw;
1243 struct atom_context *ctx = adev->mode_info.atom_context;
1244 int ret, i;
1245
1246
1247 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
1248 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1249 if (ret)
1250 return ret;
1251 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
1252 fw_info.feature, fw_info.ver);
1253
1254
1255 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
1256 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1257 if (ret)
1258 return ret;
1259 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
1260 fw_info.feature, fw_info.ver);
1261
1262
1263 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
1264 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1265 if (ret)
1266 return ret;
1267 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
1268 fw_info.feature, fw_info.ver);
1269
1270
1271 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
1272 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1273 if (ret)
1274 return ret;
1275 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
1276 fw_info.feature, fw_info.ver);
1277
1278
1279 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
1280 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1281 if (ret)
1282 return ret;
1283 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
1284 fw_info.feature, fw_info.ver);
1285
1286
1287 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
1288 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1289 if (ret)
1290 return ret;
1291 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
1292 fw_info.feature, fw_info.ver);
1293
1294
1295 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
1296 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1297 if (ret)
1298 return ret;
1299 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
1300 fw_info.feature, fw_info.ver);
1301
1302
1303 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
1304 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1305 if (ret)
1306 return ret;
1307 seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1308 fw_info.feature, fw_info.ver);
1309
1310
1311 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
1312 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1313 if (ret)
1314 return ret;
1315 seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1316 fw_info.feature, fw_info.ver);
1317
1318
1319 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
1320 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1321 if (ret)
1322 return ret;
1323 seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1324 fw_info.feature, fw_info.ver);
1325
1326
1327 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
1328 query_fw.index = 0;
1329 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1330 if (ret)
1331 return ret;
1332 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
1333 fw_info.feature, fw_info.ver);
1334
1335
1336 if (adev->asic_type == CHIP_KAVERI ||
1337 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
1338 query_fw.index = 1;
1339 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1340 if (ret)
1341 return ret;
1342 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1343 fw_info.feature, fw_info.ver);
1344 }
1345
1346
1347 query_fw.fw_type = AMDGPU_INFO_FW_SOS;
1348 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1349 if (ret)
1350 return ret;
1351 seq_printf(m, "SOS feature version: %u, firmware version: 0x%08x\n",
1352 fw_info.feature, fw_info.ver);
1353
1354
1355
1356 query_fw.fw_type = AMDGPU_INFO_FW_ASD;
1357 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1358 if (ret)
1359 return ret;
1360 seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
1361 fw_info.feature, fw_info.ver);
1362
1363 query_fw.fw_type = AMDGPU_INFO_FW_TA;
1364 for (i = 0; i < 2; i++) {
1365 query_fw.index = i;
1366 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1367 if (ret)
1368 continue;
1369 seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
1370 i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
1371 }
1372
1373
1374 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
1375 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1376 if (ret)
1377 return ret;
1378 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
1379 fw_info.feature, fw_info.ver);
1380
1381
1382 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
1383 for (i = 0; i < adev->sdma.num_instances; i++) {
1384 query_fw.index = i;
1385 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1386 if (ret)
1387 return ret;
1388 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1389 i, fw_info.feature, fw_info.ver);
1390 }
1391
1392
1393 query_fw.fw_type = AMDGPU_INFO_FW_VCN;
1394 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1395 if (ret)
1396 return ret;
1397 seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
1398 fw_info.feature, fw_info.ver);
1399
1400
1401 query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
1402 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1403 if (ret)
1404 return ret;
1405 seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
1406 fw_info.feature, fw_info.ver);
1407
1408
1409 query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
1410 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
1411 if (ret)
1412 return ret;
1413 seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
1414 fw_info.feature, fw_info.ver);
1415
1416
1417 seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version);
1418
1419 return 0;
1420}
1421
1422static const struct drm_info_list amdgpu_firmware_info_list[] = {
1423 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
1424};
1425#endif
1426
1427int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
1428{
1429#if defined(CONFIG_DEBUG_FS)
1430 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
1431 ARRAY_SIZE(amdgpu_firmware_info_list));
1432#else
1433 return 0;
1434#endif
1435}
1436