1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef KFD_IOCTL_H_INCLUDED
24#define KFD_IOCTL_H_INCLUDED
25
26#include <drm/drm.h>
27#include <linux/ioctl.h>
28
29
30
31
32
33
34
35#define KFD_IOCTL_MAJOR_VERSION 1
36#define KFD_IOCTL_MINOR_VERSION 5
37
38struct kfd_ioctl_get_version_args {
39 __u32 major_version;
40 __u32 minor_version;
41};
42
43
44#define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
45#define KFD_IOC_QUEUE_TYPE_SDMA 0x1
46#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
47#define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
48
49#define KFD_MAX_QUEUE_PERCENTAGE 100
50#define KFD_MAX_QUEUE_PRIORITY 15
51
52struct kfd_ioctl_create_queue_args {
53 __u64 ring_base_address;
54 __u64 write_pointer_address;
55 __u64 read_pointer_address;
56 __u64 doorbell_offset;
57
58 __u32 ring_size;
59 __u32 gpu_id;
60 __u32 queue_type;
61 __u32 queue_percentage;
62 __u32 queue_priority;
63 __u32 queue_id;
64
65 __u64 eop_buffer_address;
66 __u64 eop_buffer_size;
67 __u64 ctx_save_restore_address;
68 __u32 ctx_save_restore_size;
69 __u32 ctl_stack_size;
70};
71
72struct kfd_ioctl_destroy_queue_args {
73 __u32 queue_id;
74 __u32 pad;
75};
76
77struct kfd_ioctl_update_queue_args {
78 __u64 ring_base_address;
79
80 __u32 queue_id;
81 __u32 ring_size;
82 __u32 queue_percentage;
83 __u32 queue_priority;
84};
85
86struct kfd_ioctl_set_cu_mask_args {
87 __u32 queue_id;
88 __u32 num_cu_mask;
89 __u64 cu_mask_ptr;
90};
91
92struct kfd_ioctl_get_queue_wave_state_args {
93 __u64 ctl_stack_address;
94 __u32 ctl_stack_used_size;
95 __u32 save_area_used_size;
96 __u32 queue_id;
97 __u32 pad;
98};
99
100
101#define KFD_IOC_CACHE_POLICY_COHERENT 0
102#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
103
104struct kfd_ioctl_set_memory_policy_args {
105 __u64 alternate_aperture_base;
106 __u64 alternate_aperture_size;
107
108 __u32 gpu_id;
109 __u32 default_policy;
110 __u32 alternate_policy;
111 __u32 pad;
112};
113
114
115
116
117
118
119
120
121struct kfd_ioctl_get_clock_counters_args {
122 __u64 gpu_clock_counter;
123 __u64 cpu_clock_counter;
124 __u64 system_clock_counter;
125 __u64 system_clock_freq;
126
127 __u32 gpu_id;
128 __u32 pad;
129};
130
131struct kfd_process_device_apertures {
132 __u64 lds_base;
133 __u64 lds_limit;
134 __u64 scratch_base;
135 __u64 scratch_limit;
136 __u64 gpuvm_base;
137 __u64 gpuvm_limit;
138 __u32 gpu_id;
139 __u32 pad;
140};
141
142
143
144
145
146
147#define NUM_OF_SUPPORTED_GPUS 7
148struct kfd_ioctl_get_process_apertures_args {
149 struct kfd_process_device_apertures
150 process_apertures[NUM_OF_SUPPORTED_GPUS];
151
152
153 __u32 num_of_nodes;
154 __u32 pad;
155};
156
157struct kfd_ioctl_get_process_apertures_new_args {
158
159
160
161 __u64 kfd_process_device_apertures_ptr;
162
163
164
165
166 __u32 num_of_nodes;
167 __u32 pad;
168};
169
170#define MAX_ALLOWED_NUM_POINTS 100
171#define MAX_ALLOWED_AW_BUFF_SIZE 4096
172#define MAX_ALLOWED_WAC_BUFF_SIZE 128
173
174struct kfd_ioctl_dbg_register_args {
175 __u32 gpu_id;
176 __u32 pad;
177};
178
179struct kfd_ioctl_dbg_unregister_args {
180 __u32 gpu_id;
181 __u32 pad;
182};
183
184struct kfd_ioctl_dbg_address_watch_args {
185 __u64 content_ptr;
186 __u32 gpu_id;
187 __u32 buf_size_in_bytes;
188};
189
190struct kfd_ioctl_dbg_wave_control_args {
191 __u64 content_ptr;
192 __u32 gpu_id;
193 __u32 buf_size_in_bytes;
194};
195
196
197#define KFD_IOC_EVENT_SIGNAL 0
198#define KFD_IOC_EVENT_NODECHANGE 1
199#define KFD_IOC_EVENT_DEVICESTATECHANGE 2
200#define KFD_IOC_EVENT_HW_EXCEPTION 3
201#define KFD_IOC_EVENT_SYSTEM_EVENT 4
202#define KFD_IOC_EVENT_DEBUG_EVENT 5
203#define KFD_IOC_EVENT_PROFILE_EVENT 6
204#define KFD_IOC_EVENT_QUEUE_EVENT 7
205#define KFD_IOC_EVENT_MEMORY 8
206
207#define KFD_IOC_WAIT_RESULT_COMPLETE 0
208#define KFD_IOC_WAIT_RESULT_TIMEOUT 1
209#define KFD_IOC_WAIT_RESULT_FAIL 2
210
211#define KFD_SIGNAL_EVENT_LIMIT 4096
212
213
214#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
215#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
216
217
218#define KFD_HW_EXCEPTION_GPU_HANG 0
219#define KFD_HW_EXCEPTION_ECC 1
220
221
222#define KFD_MEM_ERR_NO_RAS 0
223#define KFD_MEM_ERR_SRAM_ECC 1
224#define KFD_MEM_ERR_POISON_CONSUMED 2
225#define KFD_MEM_ERR_GPU_HANG 3
226
227struct kfd_ioctl_create_event_args {
228 __u64 event_page_offset;
229 __u32 event_trigger_data;
230 __u32 event_type;
231 __u32 auto_reset;
232 __u32 node_id;
233
234 __u32 event_id;
235 __u32 event_slot_index;
236};
237
238struct kfd_ioctl_destroy_event_args {
239 __u32 event_id;
240 __u32 pad;
241};
242
243struct kfd_ioctl_set_event_args {
244 __u32 event_id;
245 __u32 pad;
246};
247
248struct kfd_ioctl_reset_event_args {
249 __u32 event_id;
250 __u32 pad;
251};
252
253struct kfd_memory_exception_failure {
254 __u32 NotPresent;
255 __u32 ReadOnly;
256 __u32 NoExecute;
257 __u32 imprecise;
258};
259
260
261struct kfd_hsa_memory_exception_data {
262 struct kfd_memory_exception_failure failure;
263 __u64 va;
264 __u32 gpu_id;
265 __u32 ErrorType;
266
267
268
269
270
271};
272
273
274struct kfd_hsa_hw_exception_data {
275 __u32 reset_type;
276 __u32 reset_cause;
277 __u32 memory_lost;
278 __u32 gpu_id;
279};
280
281
282struct kfd_event_data {
283 union {
284 struct kfd_hsa_memory_exception_data memory_exception_data;
285 struct kfd_hsa_hw_exception_data hw_exception_data;
286 };
287 __u64 kfd_event_data_ext;
288
289 __u32 event_id;
290 __u32 pad;
291};
292
293struct kfd_ioctl_wait_events_args {
294 __u64 events_ptr;
295
296 __u32 num_events;
297 __u32 wait_for_all;
298 __u32 timeout;
299 __u32 wait_result;
300};
301
302struct kfd_ioctl_set_scratch_backing_va_args {
303 __u64 va_addr;
304 __u32 gpu_id;
305 __u32 pad;
306};
307
308struct kfd_ioctl_get_tile_config_args {
309
310 __u64 tile_config_ptr;
311
312 __u64 macro_tile_config_ptr;
313
314
315
316 __u32 num_tile_configs;
317
318
319
320 __u32 num_macro_tile_configs;
321
322 __u32 gpu_id;
323 __u32 gb_addr_config;
324 __u32 num_banks;
325 __u32 num_ranks;
326
327
328
329};
330
331struct kfd_ioctl_set_trap_handler_args {
332 __u64 tba_addr;
333 __u64 tma_addr;
334 __u32 gpu_id;
335 __u32 pad;
336};
337
338struct kfd_ioctl_acquire_vm_args {
339 __u32 drm_fd;
340 __u32 gpu_id;
341};
342
343
344#define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
345#define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
346#define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
347#define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
348#define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
349
350#define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
351#define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
352#define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
353#define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
354#define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
355#define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
356#define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
357
358
359
360
361
362
363
364
365
366
367
368
369
370struct kfd_ioctl_alloc_memory_of_gpu_args {
371 __u64 va_addr;
372 __u64 size;
373 __u64 handle;
374 __u64 mmap_offset;
375 __u32 gpu_id;
376 __u32 flags;
377};
378
379
380
381
382
383struct kfd_ioctl_free_memory_of_gpu_args {
384 __u64 handle;
385};
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402struct kfd_ioctl_map_memory_to_gpu_args {
403 __u64 handle;
404 __u64 device_ids_array_ptr;
405 __u32 n_devices;
406 __u32 n_success;
407};
408
409
410
411
412
413struct kfd_ioctl_unmap_memory_from_gpu_args {
414 __u64 handle;
415 __u64 device_ids_array_ptr;
416 __u32 n_devices;
417 __u32 n_success;
418};
419
420
421
422
423
424
425
426
427struct kfd_ioctl_alloc_queue_gws_args {
428 __u32 queue_id;
429 __u32 num_gws;
430 __u32 first_gws;
431 __u32 pad;
432};
433
434struct kfd_ioctl_get_dmabuf_info_args {
435 __u64 size;
436 __u64 metadata_ptr;
437 __u32 metadata_size;
438
439
440 __u32 gpu_id;
441 __u32 flags;
442 __u32 dmabuf_fd;
443};
444
445struct kfd_ioctl_import_dmabuf_args {
446 __u64 va_addr;
447 __u64 handle;
448 __u32 gpu_id;
449 __u32 dmabuf_fd;
450};
451
452
453
454
455enum kfd_smi_event {
456 KFD_SMI_EVENT_NONE = 0,
457 KFD_SMI_EVENT_VMFAULT = 1,
458 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
459 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
460 KFD_SMI_EVENT_GPU_POST_RESET = 4,
461};
462
463#define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
464
465struct kfd_ioctl_smi_events_args {
466 __u32 gpuid;
467 __u32 anon_fd;
468};
469
470
471
472enum kfd_mmio_remap {
473 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
474 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
475};
476
477
478#define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
479
480#define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
481
482#define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
483
484#define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
485
486#define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
487
488#define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
489
490
491
492
493
494
495
496enum kfd_ioctl_svm_op {
497 KFD_IOCTL_SVM_OP_SET_ATTR,
498 KFD_IOCTL_SVM_OP_GET_ATTR
499};
500
501
502
503
504
505
506
507enum kfd_ioctl_svm_location {
508 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
509 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
510};
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530enum kfd_ioctl_svm_attr_type {
531 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
532 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
533 KFD_IOCTL_SVM_ATTR_ACCESS,
534 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
535 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
536 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
537 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
538 KFD_IOCTL_SVM_ATTR_GRANULARITY
539};
540
541
542
543
544
545
546
547
548
549struct kfd_ioctl_svm_attribute {
550 __u32 type;
551 __u32 value;
552};
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591struct kfd_ioctl_svm_args {
592 __u64 start_addr;
593 __u64 size;
594 __u32 op;
595 __u32 nattr;
596
597 struct kfd_ioctl_svm_attribute attrs[0];
598};
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634struct kfd_ioctl_set_xnack_mode_args {
635 __s32 xnack_enabled;
636};
637
638#define AMDKFD_IOCTL_BASE 'K'
639#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
640#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
641#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
642#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
643
644#define AMDKFD_IOC_GET_VERSION \
645 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
646
647#define AMDKFD_IOC_CREATE_QUEUE \
648 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
649
650#define AMDKFD_IOC_DESTROY_QUEUE \
651 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
652
653#define AMDKFD_IOC_SET_MEMORY_POLICY \
654 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
655
656#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
657 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
658
659#define AMDKFD_IOC_GET_PROCESS_APERTURES \
660 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
661
662#define AMDKFD_IOC_UPDATE_QUEUE \
663 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
664
665#define AMDKFD_IOC_CREATE_EVENT \
666 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
667
668#define AMDKFD_IOC_DESTROY_EVENT \
669 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
670
671#define AMDKFD_IOC_SET_EVENT \
672 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
673
674#define AMDKFD_IOC_RESET_EVENT \
675 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
676
677#define AMDKFD_IOC_WAIT_EVENTS \
678 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
679
680#define AMDKFD_IOC_DBG_REGISTER \
681 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
682
683#define AMDKFD_IOC_DBG_UNREGISTER \
684 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
685
686#define AMDKFD_IOC_DBG_ADDRESS_WATCH \
687 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
688
689#define AMDKFD_IOC_DBG_WAVE_CONTROL \
690 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
691
692#define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
693 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
694
695#define AMDKFD_IOC_GET_TILE_CONFIG \
696 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
697
698#define AMDKFD_IOC_SET_TRAP_HANDLER \
699 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
700
701#define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
702 AMDKFD_IOWR(0x14, \
703 struct kfd_ioctl_get_process_apertures_new_args)
704
705#define AMDKFD_IOC_ACQUIRE_VM \
706 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
707
708#define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
709 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
710
711#define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
712 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
713
714#define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
715 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
716
717#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
718 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
719
720#define AMDKFD_IOC_SET_CU_MASK \
721 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
722
723#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
724 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
725
726#define AMDKFD_IOC_GET_DMABUF_INFO \
727 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
728
729#define AMDKFD_IOC_IMPORT_DMABUF \
730 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
731
732#define AMDKFD_IOC_ALLOC_QUEUE_GWS \
733 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
734
735#define AMDKFD_IOC_SMI_EVENTS \
736 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
737
738#define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
739
740#define AMDKFD_IOC_SET_XNACK_MODE \
741 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
742
743#define AMDKFD_COMMAND_START 0x01
744#define AMDKFD_COMMAND_END 0x22
745
746#endif
747