1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef KFD_IOCTL_H_INCLUDED
24#define KFD_IOCTL_H_INCLUDED
25
26#include <drm/drm.h>
27#include <linux/ioctl.h>
28
29
30
31
32
33
34
35
36
37
38#define KFD_IOCTL_MAJOR_VERSION 1
39#define KFD_IOCTL_MINOR_VERSION 8
40
41struct kfd_ioctl_get_version_args {
42 __u32 major_version;
43 __u32 minor_version;
44};
45
46
47#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
48#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
49#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
50#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
51
52#define KFD_MAX_QUEUE_PERCENTAGE 100
53#define KFD_MAX_QUEUE_PRIORITY 15
54
55struct kfd_ioctl_create_queue_args {
56 __u64 ring_base_address;
57 __u64 write_pointer_address;
58 __u64 read_pointer_address;
59 __u64 doorbell_offset;
60
61 __u32 ring_size;
62 __u32 gpu_id;
63 __u32 queue_type;
64 __u32 queue_percentage;
65 __u32 queue_priority;
66 __u32 queue_id;
67
68 __u64 eop_buffer_address;
69 __u64 eop_buffer_size;
70 __u64 ctx_save_restore_address;
71 __u32 ctx_save_restore_size;
72 __u32 ctl_stack_size;
73};
74
75struct kfd_ioctl_destroy_queue_args {
76 __u32 queue_id;
77 __u32 pad;
78};
79
80struct kfd_ioctl_update_queue_args {
81 __u64 ring_base_address;
82
83 __u32 queue_id;
84 __u32 ring_size;
85 __u32 queue_percentage;
86 __u32 queue_priority;
87};
88
89struct kfd_ioctl_set_cu_mask_args {
90 __u32 queue_id;
91 __u32 num_cu_mask;
92 __u64 cu_mask_ptr;
93};
94
95struct kfd_ioctl_get_queue_wave_state_args {
96 __u64 ctl_stack_address;
97 __u32 ctl_stack_used_size;
98 __u32 save_area_used_size;
99 __u32 queue_id;
100 __u32 pad;
101};
102
103
104#define KFD_IOC_CACHE_POLICY_COHERENT 0
105#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
106
107struct kfd_ioctl_set_memory_policy_args {
108 __u64 alternate_aperture_base;
109 __u64 alternate_aperture_size;
110
111 __u32 gpu_id;
112 __u32 default_policy;
113 __u32 alternate_policy;
114 __u32 pad;
115};
116
117
118
119
120
121
122
123
124struct kfd_ioctl_get_clock_counters_args {
125 __u64 gpu_clock_counter;
126 __u64 cpu_clock_counter;
127 __u64 system_clock_counter;
128 __u64 system_clock_freq;
129
130 __u32 gpu_id;
131 __u32 pad;
132};
133
134struct kfd_process_device_apertures {
135 __u64 lds_base;
136 __u64 lds_limit;
137 __u64 scratch_base;
138 __u64 scratch_limit;
139 __u64 gpuvm_base;
140 __u64 gpuvm_limit;
141 __u32 gpu_id;
142 __u32 pad;
143};
144
145
146
147
148
149
150#define NUM_OF_SUPPORTED_GPUS 7
151struct kfd_ioctl_get_process_apertures_args {
152 struct kfd_process_device_apertures
153 process_apertures[NUM_OF_SUPPORTED_GPUS];
154
155
156 __u32 num_of_nodes;
157 __u32 pad;
158};
159
160struct kfd_ioctl_get_process_apertures_new_args {
161
162
163
164 __u64 kfd_process_device_apertures_ptr;
165
166
167
168
169 __u32 num_of_nodes;
170 __u32 pad;
171};
172
173#define MAX_ALLOWED_NUM_POINTS 100
174#define MAX_ALLOWED_AW_BUFF_SIZE 4096
175#define MAX_ALLOWED_WAC_BUFF_SIZE 128
176
177struct kfd_ioctl_dbg_register_args {
178 __u32 gpu_id;
179 __u32 pad;
180};
181
182struct kfd_ioctl_dbg_unregister_args {
183 __u32 gpu_id;
184 __u32 pad;
185};
186
187struct kfd_ioctl_dbg_address_watch_args {
188 __u64 content_ptr;
189 __u32 gpu_id;
190 __u32 buf_size_in_bytes;
191};
192
193struct kfd_ioctl_dbg_wave_control_args {
194 __u64 content_ptr;
195 __u32 gpu_id;
196 __u32 buf_size_in_bytes;
197};
198
199#define KFD_INVALID_FD 0xffffffff
200
201
202#define KFD_IOC_EVENT_SIGNAL 0
203#define KFD_IOC_EVENT_NODECHANGE 1
204#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
205#define KFD_IOC_EVENT_HW_EXCEPTION 3
206#define KFD_IOC_EVENT_SYSTEM_EVENT 4
207#define KFD_IOC_EVENT_DEBUG_EVENT 5
208#define KFD_IOC_EVENT_PROFILE_EVENT 6
209#define KFD_IOC_EVENT_QUEUE_EVENT 7
210#define KFD_IOC_EVENT_MEMORY 8
211
212#define KFD_IOC_WAIT_RESULT_COMPLETE 0
213#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
214#define KFD_IOC_WAIT_RESULT_FAIL 2
215
216#define KFD_SIGNAL_EVENT_LIMIT 4096
217
218
219#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
220#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
221
222
223#define KFD_HW_EXCEPTION_GPU_HANG 0
224#define KFD_HW_EXCEPTION_ECC 1
225
226
227#define KFD_MEM_ERR_NO_RAS 0
228#define KFD_MEM_ERR_SRAM_ECC 1
229#define KFD_MEM_ERR_POISON_CONSUMED 2
230#define KFD_MEM_ERR_GPU_HANG 3
231
232struct kfd_ioctl_create_event_args {
233 __u64 event_page_offset;
234 __u32 event_trigger_data;
235 __u32 event_type;
236 __u32 auto_reset;
237 __u32 node_id;
238
239 __u32 event_id;
240 __u32 event_slot_index;
241};
242
243struct kfd_ioctl_destroy_event_args {
244 __u32 event_id;
245 __u32 pad;
246};
247
248struct kfd_ioctl_set_event_args {
249 __u32 event_id;
250 __u32 pad;
251};
252
253struct kfd_ioctl_reset_event_args {
254 __u32 event_id;
255 __u32 pad;
256};
257
258struct kfd_memory_exception_failure {
259 __u32 NotPresent;
260 __u32 ReadOnly;
261 __u32 NoExecute;
262 __u32 imprecise;
263};
264
265
266struct kfd_hsa_memory_exception_data {
267 struct kfd_memory_exception_failure failure;
268 __u64 va;
269 __u32 gpu_id;
270 __u32 ErrorType;
271
272
273
274
275
276};
277
278
279struct kfd_hsa_hw_exception_data {
280 __u32 reset_type;
281 __u32 reset_cause;
282 __u32 memory_lost;
283 __u32 gpu_id;
284};
285
286
287struct kfd_event_data {
288 union {
289 struct kfd_hsa_memory_exception_data memory_exception_data;
290 struct kfd_hsa_hw_exception_data hw_exception_data;
291 };
292 __u64 kfd_event_data_ext;
293
294 __u32 event_id;
295 __u32 pad;
296};
297
298struct kfd_ioctl_wait_events_args {
299 __u64 events_ptr;
300
301 __u32 num_events;
302 __u32 wait_for_all;
303 __u32 timeout;
304 __u32 wait_result;
305};
306
307struct kfd_ioctl_set_scratch_backing_va_args {
308 __u64 va_addr;
309 __u32 gpu_id;
310 __u32 pad;
311};
312
313struct kfd_ioctl_get_tile_config_args {
314
315 __u64 tile_config_ptr;
316
317 __u64 macro_tile_config_ptr;
318
319
320
321 __u32 num_tile_configs;
322
323
324
325 __u32 num_macro_tile_configs;
326
327 __u32 gpu_id;
328 __u32 gb_addr_config;
329 __u32 num_banks;
330 __u32 num_ranks;
331
332
333
334};
335
336struct kfd_ioctl_set_trap_handler_args {
337 __u64 tba_addr;
338 __u64 tma_addr;
339 __u32 gpu_id;
340 __u32 pad;
341};
342
343struct kfd_ioctl_acquire_vm_args {
344 __u32 drm_fd;
345 __u32 gpu_id;
346};
347
348
349#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
350#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
351#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
352#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
353#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
354
355#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
356#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
357#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
358#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
359#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
360#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
361#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
362
363
364
365
366
367
368
369
370
371
372
373
374
375struct kfd_ioctl_alloc_memory_of_gpu_args {
376 __u64 va_addr;
377 __u64 size;
378 __u64 handle;
379 __u64 mmap_offset;
380 __u32 gpu_id;
381 __u32 flags;
382};
383
384
385
386
387
388struct kfd_ioctl_free_memory_of_gpu_args {
389 __u64 handle;
390};
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407struct kfd_ioctl_map_memory_to_gpu_args {
408 __u64 handle;
409 __u64 device_ids_array_ptr;
410 __u32 n_devices;
411 __u32 n_success;
412};
413
414
415
416
417
418struct kfd_ioctl_unmap_memory_from_gpu_args {
419 __u64 handle;
420 __u64 device_ids_array_ptr;
421 __u32 n_devices;
422 __u32 n_success;
423};
424
425
426
427
428
429
430
431
432struct kfd_ioctl_alloc_queue_gws_args {
433 __u32 queue_id;
434 __u32 num_gws;
435 __u32 first_gws;
436 __u32 pad;
437};
438
439struct kfd_ioctl_get_dmabuf_info_args {
440 __u64 size;
441 __u64 metadata_ptr;
442 __u32 metadata_size;
443
444
445 __u32 gpu_id;
446 __u32 flags;
447 __u32 dmabuf_fd;
448};
449
450struct kfd_ioctl_import_dmabuf_args {
451 __u64 va_addr;
452 __u64 handle;
453 __u32 gpu_id;
454 __u32 dmabuf_fd;
455};
456
457
458
459
460enum kfd_smi_event {
461 KFD_SMI_EVENT_NONE = 0,
462 KFD_SMI_EVENT_VMFAULT = 1,
463 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
464 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
465 KFD_SMI_EVENT_GPU_POST_RESET = 4,
466};
467
468#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
469#define KFD_SMI_EVENT_MSG_SIZE 96
470
471struct kfd_ioctl_smi_events_args {
472 __u32 gpuid;
473 __u32 anon_fd;
474};
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494enum kfd_criu_op {
495 KFD_CRIU_OP_PROCESS_INFO,
496 KFD_CRIU_OP_CHECKPOINT,
497 KFD_CRIU_OP_UNPAUSE,
498 KFD_CRIU_OP_RESTORE,
499 KFD_CRIU_OP_RESUME,
500};
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519struct kfd_ioctl_criu_args {
520 __u64 devices;
521 __u64 bos;
522 __u64 priv_data;
523 __u64 priv_data_size;
524 __u32 num_devices;
525 __u32 num_bos;
526 __u32 num_objects;
527 __u32 pid;
528 __u32 op;
529};
530
531struct kfd_criu_device_bucket {
532 __u32 user_gpu_id;
533 __u32 actual_gpu_id;
534 __u32 drm_fd;
535 __u32 pad;
536};
537
538struct kfd_criu_bo_bucket {
539 __u64 addr;
540 __u64 size;
541 __u64 offset;
542 __u64 restored_offset;
543 __u32 gpu_id;
544 __u32 alloc_flags;
545 __u32 dmabuf_fd;
546 __u32 pad;
547};
548
549
550
551
552
553
554enum kfd_mmio_remap {
555 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
556 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
557};
558
559
560#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
561
562#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
563
564#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
565
566#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
567
568#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
569
570#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
571
572
573
574
575
576
577
578enum kfd_ioctl_svm_op {
579 KFD_IOCTL_SVM_OP_SET_ATTR,
580 KFD_IOCTL_SVM_OP_GET_ATTR
581};
582
583
584
585
586
587
588
589enum kfd_ioctl_svm_location {
590 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
591 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
592};
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612enum kfd_ioctl_svm_attr_type {
613 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
614 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
615 KFD_IOCTL_SVM_ATTR_ACCESS,
616 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
617 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
618 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
619 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
620 KFD_IOCTL_SVM_ATTR_GRANULARITY
621};
622
623
624
625
626
627
628
629
630
631struct kfd_ioctl_svm_attribute {
632 __u32 type;
633 __u32 value;
634};
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674struct kfd_ioctl_svm_args {
675 __u64 start_addr;
676 __u64 size;
677 __u32 op;
678 __u32 nattr;
679
680 struct kfd_ioctl_svm_attribute attrs[];
681};
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717struct kfd_ioctl_set_xnack_mode_args {
718 __s32 xnack_enabled;
719};
720
721#define AMDKFD_IOCTL_BASE 'K'
722#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
723#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
724#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
725#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
726
727#define AMDKFD_IOC_GET_VERSION \
728 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
729
730#define AMDKFD_IOC_CREATE_QUEUE \
731 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
732
733#define AMDKFD_IOC_DESTROY_QUEUE \
734 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
735
736#define AMDKFD_IOC_SET_MEMORY_POLICY \
737 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
738
739#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
740 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
741
742#define AMDKFD_IOC_GET_PROCESS_APERTURES \
743 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
744
745#define AMDKFD_IOC_UPDATE_QUEUE \
746 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
747
748#define AMDKFD_IOC_CREATE_EVENT \
749 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
750
751#define AMDKFD_IOC_DESTROY_EVENT \
752 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
753
754#define AMDKFD_IOC_SET_EVENT \
755 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
756
757#define AMDKFD_IOC_RESET_EVENT \
758 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
759
760#define AMDKFD_IOC_WAIT_EVENTS \
761 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
762
763#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \
764 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
765
766#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \
767 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
768
769#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \
770 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
771
772#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \
773 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
774
775#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
776 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
777
778#define AMDKFD_IOC_GET_TILE_CONFIG \
779 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
780
781#define AMDKFD_IOC_SET_TRAP_HANDLER \
782 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
783
784#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
785 AMDKFD_IOWR(0x14, \
786 struct kfd_ioctl_get_process_apertures_new_args)
787
788#define AMDKFD_IOC_ACQUIRE_VM \
789 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
790
791#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
792 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
793
794#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
795 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
796
797#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
798 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
799
800#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
801 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
802
803#define AMDKFD_IOC_SET_CU_MASK \
804 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
805
806#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
807 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
808
809#define AMDKFD_IOC_GET_DMABUF_INFO \
810 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
811
812#define AMDKFD_IOC_IMPORT_DMABUF \
813 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
814
815#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
816 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
817
818#define AMDKFD_IOC_SMI_EVENTS \
819 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
820
821#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
822
823#define AMDKFD_IOC_SET_XNACK_MODE \
824 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
825
826#define AMDKFD_IOC_CRIU_OP \
827 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
828
829#define AMDKFD_COMMAND_START 0x01
830#define AMDKFD_COMMAND_END 0x23
831
832#endif
833