• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is auto-generated. Modifications will be lost.
3  *
4  * See https://android.googlesource.com/platform/bionic/+/master/libc/kernel/
5  * for more information.
6  */
7 #ifndef KFD_IOCTL_H_INCLUDED
8 #define KFD_IOCTL_H_INCLUDED
9 #include <drm/drm.h>
10 #include <linux/ioctl.h>
11 #define KFD_IOCTL_MAJOR_VERSION 1
12 #define KFD_IOCTL_MINOR_VERSION 17
13 struct kfd_ioctl_get_version_args {
14   __u32 major_version;
15   __u32 minor_version;
16 };
17 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
18 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
19 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
20 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
21 #define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4
22 #define KFD_MAX_QUEUE_PERCENTAGE 100
23 #define KFD_MAX_QUEUE_PRIORITY 15
24 struct kfd_ioctl_create_queue_args {
25   __u64 ring_base_address;
26   __u64 write_pointer_address;
27   __u64 read_pointer_address;
28   __u64 doorbell_offset;
29   __u32 ring_size;
30   __u32 gpu_id;
31   __u32 queue_type;
32   __u32 queue_percentage;
33   __u32 queue_priority;
34   __u32 queue_id;
35   __u64 eop_buffer_address;
36   __u64 eop_buffer_size;
37   __u64 ctx_save_restore_address;
38   __u32 ctx_save_restore_size;
39   __u32 ctl_stack_size;
40   __u32 sdma_engine_id;
41   __u32 pad;
42 };
43 struct kfd_ioctl_destroy_queue_args {
44   __u32 queue_id;
45   __u32 pad;
46 };
47 struct kfd_ioctl_update_queue_args {
48   __u64 ring_base_address;
49   __u32 queue_id;
50   __u32 ring_size;
51   __u32 queue_percentage;
52   __u32 queue_priority;
53 };
54 struct kfd_ioctl_set_cu_mask_args {
55   __u32 queue_id;
56   __u32 num_cu_mask;
57   __u64 cu_mask_ptr;
58 };
59 struct kfd_ioctl_get_queue_wave_state_args {
60   __u64 ctl_stack_address;
61   __u32 ctl_stack_used_size;
62   __u32 save_area_used_size;
63   __u32 queue_id;
64   __u32 pad;
65 };
66 struct kfd_ioctl_get_available_memory_args {
67   __u64 available;
68   __u32 gpu_id;
69   __u32 pad;
70 };
71 struct kfd_dbg_device_info_entry {
72   __u64 exception_status;
73   __u64 lds_base;
74   __u64 lds_limit;
75   __u64 scratch_base;
76   __u64 scratch_limit;
77   __u64 gpuvm_base;
78   __u64 gpuvm_limit;
79   __u32 gpu_id;
80   __u32 location_id;
81   __u32 vendor_id;
82   __u32 device_id;
83   __u32 revision_id;
84   __u32 subsystem_vendor_id;
85   __u32 subsystem_device_id;
86   __u32 fw_version;
87   __u32 gfx_target_version;
88   __u32 simd_count;
89   __u32 max_waves_per_simd;
90   __u32 array_count;
91   __u32 simd_arrays_per_engine;
92   __u32 num_xcc;
93   __u32 capability;
94   __u32 debug_prop;
95 };
96 #define KFD_IOC_CACHE_POLICY_COHERENT 0
97 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
98 struct kfd_ioctl_set_memory_policy_args {
99   __u64 alternate_aperture_base;
100   __u64 alternate_aperture_size;
101   __u32 gpu_id;
102   __u32 default_policy;
103   __u32 alternate_policy;
104   __u32 pad;
105 };
106 struct kfd_ioctl_get_clock_counters_args {
107   __u64 gpu_clock_counter;
108   __u64 cpu_clock_counter;
109   __u64 system_clock_counter;
110   __u64 system_clock_freq;
111   __u32 gpu_id;
112   __u32 pad;
113 };
114 struct kfd_process_device_apertures {
115   __u64 lds_base;
116   __u64 lds_limit;
117   __u64 scratch_base;
118   __u64 scratch_limit;
119   __u64 gpuvm_base;
120   __u64 gpuvm_limit;
121   __u32 gpu_id;
122   __u32 pad;
123 };
124 #define NUM_OF_SUPPORTED_GPUS 7
125 struct kfd_ioctl_get_process_apertures_args {
126   struct kfd_process_device_apertures process_apertures[NUM_OF_SUPPORTED_GPUS];
127   __u32 num_of_nodes;
128   __u32 pad;
129 };
130 struct kfd_ioctl_get_process_apertures_new_args {
131   __u64 kfd_process_device_apertures_ptr;
132   __u32 num_of_nodes;
133   __u32 pad;
134 };
135 #define MAX_ALLOWED_NUM_POINTS 100
136 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
137 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
138 struct kfd_ioctl_dbg_register_args {
139   __u32 gpu_id;
140   __u32 pad;
141 };
142 struct kfd_ioctl_dbg_unregister_args {
143   __u32 gpu_id;
144   __u32 pad;
145 };
146 struct kfd_ioctl_dbg_address_watch_args {
147   __u64 content_ptr;
148   __u32 gpu_id;
149   __u32 buf_size_in_bytes;
150 };
151 struct kfd_ioctl_dbg_wave_control_args {
152   __u64 content_ptr;
153   __u32 gpu_id;
154   __u32 buf_size_in_bytes;
155 };
156 #define KFD_INVALID_FD 0xffffffff
157 #define KFD_IOC_EVENT_SIGNAL 0
158 #define KFD_IOC_EVENT_NODECHANGE 1
159 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
160 #define KFD_IOC_EVENT_HW_EXCEPTION 3
161 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
162 #define KFD_IOC_EVENT_DEBUG_EVENT 5
163 #define KFD_IOC_EVENT_PROFILE_EVENT 6
164 #define KFD_IOC_EVENT_QUEUE_EVENT 7
165 #define KFD_IOC_EVENT_MEMORY 8
166 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
167 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
168 #define KFD_IOC_WAIT_RESULT_FAIL 2
169 #define KFD_SIGNAL_EVENT_LIMIT 4096
170 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
171 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
172 #define KFD_HW_EXCEPTION_GPU_HANG 0
173 #define KFD_HW_EXCEPTION_ECC 1
174 #define KFD_MEM_ERR_NO_RAS 0
175 #define KFD_MEM_ERR_SRAM_ECC 1
176 #define KFD_MEM_ERR_POISON_CONSUMED 2
177 #define KFD_MEM_ERR_GPU_HANG 3
178 struct kfd_ioctl_create_event_args {
179   __u64 event_page_offset;
180   __u32 event_trigger_data;
181   __u32 event_type;
182   __u32 auto_reset;
183   __u32 node_id;
184   __u32 event_id;
185   __u32 event_slot_index;
186 };
187 struct kfd_ioctl_destroy_event_args {
188   __u32 event_id;
189   __u32 pad;
190 };
191 struct kfd_ioctl_set_event_args {
192   __u32 event_id;
193   __u32 pad;
194 };
195 struct kfd_ioctl_reset_event_args {
196   __u32 event_id;
197   __u32 pad;
198 };
199 struct kfd_memory_exception_failure {
200   __u32 NotPresent;
201   __u32 ReadOnly;
202   __u32 NoExecute;
203   __u32 imprecise;
204 };
205 struct kfd_hsa_memory_exception_data {
206   struct kfd_memory_exception_failure failure;
207   __u64 va;
208   __u32 gpu_id;
209   __u32 ErrorType;
210 };
211 struct kfd_hsa_hw_exception_data {
212   __u32 reset_type;
213   __u32 reset_cause;
214   __u32 memory_lost;
215   __u32 gpu_id;
216 };
217 struct kfd_hsa_signal_event_data {
218   __u64 last_event_age;
219 };
220 struct kfd_event_data {
221   union {
222     struct kfd_hsa_memory_exception_data memory_exception_data;
223     struct kfd_hsa_hw_exception_data hw_exception_data;
224     struct kfd_hsa_signal_event_data signal_event_data;
225   };
226   __u64 kfd_event_data_ext;
227   __u32 event_id;
228   __u32 pad;
229 };
230 struct kfd_ioctl_wait_events_args {
231   __u64 events_ptr;
232   __u32 num_events;
233   __u32 wait_for_all;
234   __u32 timeout;
235   __u32 wait_result;
236 };
237 struct kfd_ioctl_set_scratch_backing_va_args {
238   __u64 va_addr;
239   __u32 gpu_id;
240   __u32 pad;
241 };
242 struct kfd_ioctl_get_tile_config_args {
243   __u64 tile_config_ptr;
244   __u64 macro_tile_config_ptr;
245   __u32 num_tile_configs;
246   __u32 num_macro_tile_configs;
247   __u32 gpu_id;
248   __u32 gb_addr_config;
249   __u32 num_banks;
250   __u32 num_ranks;
251 };
252 struct kfd_ioctl_set_trap_handler_args {
253   __u64 tba_addr;
254   __u64 tma_addr;
255   __u32 gpu_id;
256   __u32 pad;
257 };
258 struct kfd_ioctl_acquire_vm_args {
259   __u32 drm_fd;
260   __u32 gpu_id;
261 };
262 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
263 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
264 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
265 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
266 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
267 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
268 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
269 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
270 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
271 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
272 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
273 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
274 #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24)
275 #define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23)
276 struct kfd_ioctl_alloc_memory_of_gpu_args {
277   __u64 va_addr;
278   __u64 size;
279   __u64 handle;
280   __u64 mmap_offset;
281   __u32 gpu_id;
282   __u32 flags;
283 };
284 struct kfd_ioctl_free_memory_of_gpu_args {
285   __u64 handle;
286 };
287 struct kfd_ioctl_map_memory_to_gpu_args {
288   __u64 handle;
289   __u64 device_ids_array_ptr;
290   __u32 n_devices;
291   __u32 n_success;
292 };
293 struct kfd_ioctl_unmap_memory_from_gpu_args {
294   __u64 handle;
295   __u64 device_ids_array_ptr;
296   __u32 n_devices;
297   __u32 n_success;
298 };
299 struct kfd_ioctl_alloc_queue_gws_args {
300   __u32 queue_id;
301   __u32 num_gws;
302   __u32 first_gws;
303   __u32 pad;
304 };
305 struct kfd_ioctl_get_dmabuf_info_args {
306   __u64 size;
307   __u64 metadata_ptr;
308   __u32 metadata_size;
309   __u32 gpu_id;
310   __u32 flags;
311   __u32 dmabuf_fd;
312 };
313 struct kfd_ioctl_import_dmabuf_args {
314   __u64 va_addr;
315   __u64 handle;
316   __u32 gpu_id;
317   __u32 dmabuf_fd;
318 };
319 struct kfd_ioctl_export_dmabuf_args {
320   __u64 handle;
321   __u32 flags;
322   __u32 dmabuf_fd;
323 };
324 enum kfd_smi_event {
325   KFD_SMI_EVENT_NONE = 0,
326   KFD_SMI_EVENT_VMFAULT = 1,
327   KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
328   KFD_SMI_EVENT_GPU_PRE_RESET = 3,
329   KFD_SMI_EVENT_GPU_POST_RESET = 4,
330   KFD_SMI_EVENT_MIGRATE_START = 5,
331   KFD_SMI_EVENT_MIGRATE_END = 6,
332   KFD_SMI_EVENT_PAGE_FAULT_START = 7,
333   KFD_SMI_EVENT_PAGE_FAULT_END = 8,
334   KFD_SMI_EVENT_QUEUE_EVICTION = 9,
335   KFD_SMI_EVENT_QUEUE_RESTORE = 10,
336   KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
337   KFD_SMI_EVENT_ALL_PROCESS = 64
338 };
339 enum KFD_MIGRATE_TRIGGERS {
340   KFD_MIGRATE_TRIGGER_PREFETCH,
341   KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
342   KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
343   KFD_MIGRATE_TRIGGER_TTM_EVICTION
344 };
345 enum KFD_QUEUE_EVICTION_TRIGGERS {
346   KFD_QUEUE_EVICTION_TRIGGER_SVM,
347   KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
348   KFD_QUEUE_EVICTION_TRIGGER_TTM,
349   KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
350   KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
351   KFD_QUEUE_EVICTION_CRIU_RESTORE
352 };
353 enum KFD_SVM_UNMAP_TRIGGERS {
354   KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
355   KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
356   KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
357 };
358 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
359 #define KFD_SMI_EVENT_MSG_SIZE 96
360 struct kfd_ioctl_smi_events_args {
361   __u32 gpuid;
362   __u32 anon_fd;
363 };
364 #define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num,reset_cause) "%x %s\n", (reset_seq_num), (reset_cause)
365 #define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask,counter) "%llx:%llx\n", (bitmask), (counter)
366 #define KFD_EVENT_FMT_VMFAULT(pid,task_name) "%x:%s\n", (pid), (task_name)
367 #define KFD_EVENT_FMT_PAGEFAULT_START(ns,pid,addr,node,rw) "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw)
368 #define KFD_EVENT_FMT_PAGEFAULT_END(ns,pid,addr,node,migrate_update) "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update)
369 #define KFD_EVENT_FMT_MIGRATE_START(ns,pid,start,size,from,to,prefetch_loc,preferred_loc,migrate_trigger) "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size), (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger)
370 #define KFD_EVENT_FMT_MIGRATE_END(ns,pid,start,size,from,to,migrate_trigger) "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size), (from), (to), (migrate_trigger)
371 #define KFD_EVENT_FMT_QUEUE_EVICTION(ns,pid,node,evict_trigger) "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger)
372 #define KFD_EVENT_FMT_QUEUE_RESTORE(ns,pid,node,rescheduled) "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled)
373 #define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns,pid,addr,size,node,unmap_trigger) "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size), (node), (unmap_trigger)
374 enum kfd_criu_op {
375   KFD_CRIU_OP_PROCESS_INFO,
376   KFD_CRIU_OP_CHECKPOINT,
377   KFD_CRIU_OP_UNPAUSE,
378   KFD_CRIU_OP_RESTORE,
379   KFD_CRIU_OP_RESUME,
380 };
381 struct kfd_ioctl_criu_args {
382   __u64 devices;
383   __u64 bos;
384   __u64 priv_data;
385   __u64 priv_data_size;
386   __u32 num_devices;
387   __u32 num_bos;
388   __u32 num_objects;
389   __u32 pid;
390   __u32 op;
391 };
392 struct kfd_criu_device_bucket {
393   __u32 user_gpu_id;
394   __u32 actual_gpu_id;
395   __u32 drm_fd;
396   __u32 pad;
397 };
398 struct kfd_criu_bo_bucket {
399   __u64 addr;
400   __u64 size;
401   __u64 offset;
402   __u64 restored_offset;
403   __u32 gpu_id;
404   __u32 alloc_flags;
405   __u32 dmabuf_fd;
406   __u32 pad;
407 };
408 enum kfd_mmio_remap {
409   KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
410   KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
411 };
412 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
413 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
414 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
415 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
416 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
417 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
418 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040
419 #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080
420 enum kfd_ioctl_svm_op {
421   KFD_IOCTL_SVM_OP_SET_ATTR,
422   KFD_IOCTL_SVM_OP_GET_ATTR
423 };
424 enum kfd_ioctl_svm_location {
425   KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
426   KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
427 };
428 enum kfd_ioctl_svm_attr_type {
429   KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
430   KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
431   KFD_IOCTL_SVM_ATTR_ACCESS,
432   KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
433   KFD_IOCTL_SVM_ATTR_NO_ACCESS,
434   KFD_IOCTL_SVM_ATTR_SET_FLAGS,
435   KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
436   KFD_IOCTL_SVM_ATTR_GRANULARITY
437 };
438 struct kfd_ioctl_svm_attribute {
439   __u32 type;
440   __u32 value;
441 };
442 struct kfd_ioctl_svm_args {
443   __u64 start_addr;
444   __u64 size;
445   __u32 op;
446   __u32 nattr;
447   struct kfd_ioctl_svm_attribute attrs[];
448 };
449 struct kfd_ioctl_set_xnack_mode_args {
450   __s32 xnack_enabled;
451 };
452 enum kfd_dbg_trap_override_mode {
453   KFD_DBG_TRAP_OVERRIDE_OR = 0,
454   KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
455 };
456 enum kfd_dbg_trap_mask {
457   KFD_DBG_TRAP_MASK_FP_INVALID = 1,
458   KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
459   KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
460   KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
461   KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
462   KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
463   KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
464   KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
465   KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
466   KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
467   KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
468 };
469 enum kfd_dbg_trap_wave_launch_mode {
470   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
471   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
472   KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
473 };
474 enum kfd_dbg_trap_address_watch_mode {
475   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
476   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
477   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
478   KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
479 };
480 enum kfd_dbg_trap_flags {
481   KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
482   KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
483 };
484 enum kfd_dbg_trap_exception_code {
485   EC_NONE = 0,
486   EC_QUEUE_WAVE_ABORT = 1,
487   EC_QUEUE_WAVE_TRAP = 2,
488   EC_QUEUE_WAVE_MATH_ERROR = 3,
489   EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
490   EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
491   EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
492   EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
493   EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
494   EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
495   EC_QUEUE_PACKET_RESERVED = 19,
496   EC_QUEUE_PACKET_UNSUPPORTED = 20,
497   EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
498   EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
499   EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
500   EC_QUEUE_PREEMPTION_ERROR = 30,
501   EC_QUEUE_NEW = 31,
502   EC_DEVICE_QUEUE_DELETE = 32,
503   EC_DEVICE_MEMORY_VIOLATION = 33,
504   EC_DEVICE_RAS_ERROR = 34,
505   EC_DEVICE_FATAL_HALT = 35,
506   EC_DEVICE_NEW = 36,
507   EC_PROCESS_RUNTIME = 48,
508   EC_PROCESS_DEVICE_REMOVE = 49,
509   EC_MAX
510 };
511 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1))
512 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | KFD_EC_MASK(EC_QUEUE_NEW))
513 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | KFD_EC_MASK(EC_DEVICE_NEW))
514 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
515 #define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
516 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
517 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
518 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
519 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
520 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode) (KFD_DBG_EC_IS_VALID(ecode) && ! ! (KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
521 enum kfd_dbg_runtime_state {
522   DEBUG_RUNTIME_STATE_DISABLED = 0,
523   DEBUG_RUNTIME_STATE_ENABLED = 1,
524   DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
525   DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
526 };
527 struct kfd_runtime_info {
528   __u64 r_debug;
529   __u32 runtime_state;
530   __u32 ttmp_setup;
531 };
532 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1
533 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2
534 struct kfd_ioctl_runtime_enable_args {
535   __u64 r_debug;
536   __u32 mode_mask;
537   __u32 capabilities_mask;
538 };
539 struct kfd_queue_snapshot_entry {
540   __u64 exception_status;
541   __u64 ring_base_address;
542   __u64 write_pointer_address;
543   __u64 read_pointer_address;
544   __u64 ctx_save_restore_address;
545   __u32 queue_id;
546   __u32 gpu_id;
547   __u32 ring_size;
548   __u32 queue_type;
549   __u32 ctx_save_restore_area_size;
550   __u32 reserved;
551 };
552 #define KFD_DBG_QUEUE_ERROR_BIT 30
553 #define KFD_DBG_QUEUE_INVALID_BIT 31
554 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT)
555 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT)
556 struct kfd_context_save_area_header {
557   struct {
558     __u32 control_stack_offset;
559     __u32 control_stack_size;
560     __u32 wave_state_offset;
561     __u32 wave_state_size;
562   } wave_state;
563   __u32 debug_offset;
564   __u32 debug_size;
565   __u64 err_payload_addr;
566   __u32 err_event_id;
567   __u32 reserved1;
568 };
569 enum kfd_dbg_trap_operations {
570   KFD_IOC_DBG_TRAP_ENABLE = 0,
571   KFD_IOC_DBG_TRAP_DISABLE = 1,
572   KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
573   KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
574   KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,
575   KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,
576   KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,
577   KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,
578   KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,
579   KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,
580   KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
581   KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
582   KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
583   KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
584   KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
585 };
586 struct kfd_ioctl_dbg_trap_enable_args {
587   __u64 exception_mask;
588   __u64 rinfo_ptr;
589   __u32 rinfo_size;
590   __u32 dbg_fd;
591 };
592 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
593   __u64 exception_mask;
594   __u32 gpu_id;
595   __u32 queue_id;
596 };
597 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
598   __u64 exception_mask;
599 };
600 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
601   __u32 override_mode;
602   __u32 enable_mask;
603   __u32 support_request_mask;
604   __u32 pad;
605 };
606 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
607   __u32 launch_mode;
608   __u32 pad;
609 };
610 struct kfd_ioctl_dbg_trap_suspend_queues_args {
611   __u64 exception_mask;
612   __u64 queue_array_ptr;
613   __u32 num_queues;
614   __u32 grace_period;
615 };
616 struct kfd_ioctl_dbg_trap_resume_queues_args {
617   __u64 queue_array_ptr;
618   __u32 num_queues;
619   __u32 pad;
620 };
621 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
622   __u64 address;
623   __u32 mode;
624   __u32 mask;
625   __u32 gpu_id;
626   __u32 id;
627 };
628 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
629   __u32 gpu_id;
630   __u32 id;
631 };
632 struct kfd_ioctl_dbg_trap_set_flags_args {
633   __u32 flags;
634   __u32 pad;
635 };
636 struct kfd_ioctl_dbg_trap_query_debug_event_args {
637   __u64 exception_mask;
638   __u32 gpu_id;
639   __u32 queue_id;
640 };
641 struct kfd_ioctl_dbg_trap_query_exception_info_args {
642   __u64 info_ptr;
643   __u32 info_size;
644   __u32 source_id;
645   __u32 exception_code;
646   __u32 clear_exception;
647 };
648 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
649   __u64 exception_mask;
650   __u64 snapshot_buf_ptr;
651   __u32 num_queues;
652   __u32 entry_size;
653 };
654 struct kfd_ioctl_dbg_trap_device_snapshot_args {
655   __u64 exception_mask;
656   __u64 snapshot_buf_ptr;
657   __u32 num_devices;
658   __u32 entry_size;
659 };
660 struct kfd_ioctl_dbg_trap_args {
661   __u32 pid;
662   __u32 op;
663   union {
664     struct kfd_ioctl_dbg_trap_enable_args enable;
665     struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
666     struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
667     struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
668     struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
669     struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
670     struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
671     struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
672     struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
673     struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
674     struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
675     struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
676     struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
677     struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
678   };
679 };
680 #define AMDKFD_IOCTL_BASE 'K'
681 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
682 #define AMDKFD_IOR(nr,type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
683 #define AMDKFD_IOW(nr,type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
684 #define AMDKFD_IOWR(nr,type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
685 #define AMDKFD_IOC_GET_VERSION AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
686 #define AMDKFD_IOC_CREATE_QUEUE AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
687 #define AMDKFD_IOC_DESTROY_QUEUE AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
688 #define AMDKFD_IOC_SET_MEMORY_POLICY AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
689 #define AMDKFD_IOC_GET_CLOCK_COUNTERS AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
690 #define AMDKFD_IOC_GET_PROCESS_APERTURES AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
691 #define AMDKFD_IOC_UPDATE_QUEUE AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
692 #define AMDKFD_IOC_CREATE_EVENT AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
693 #define AMDKFD_IOC_DESTROY_EVENT AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
694 #define AMDKFD_IOC_SET_EVENT AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
695 #define AMDKFD_IOC_RESET_EVENT AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
696 #define AMDKFD_IOC_WAIT_EVENTS AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
697 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
698 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
699 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
700 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
701 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
702 #define AMDKFD_IOC_GET_TILE_CONFIG AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
703 #define AMDKFD_IOC_SET_TRAP_HANDLER AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
704 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW AMDKFD_IOWR(0x14, struct kfd_ioctl_get_process_apertures_new_args)
705 #define AMDKFD_IOC_ACQUIRE_VM AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
706 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
707 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
708 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
709 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
710 #define AMDKFD_IOC_SET_CU_MASK AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
711 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
712 #define AMDKFD_IOC_GET_DMABUF_INFO AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
713 #define AMDKFD_IOC_IMPORT_DMABUF AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
714 #define AMDKFD_IOC_ALLOC_QUEUE_GWS AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
715 #define AMDKFD_IOC_SMI_EVENTS AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
716 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
717 #define AMDKFD_IOC_SET_XNACK_MODE AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
718 #define AMDKFD_IOC_CRIU_OP AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
719 #define AMDKFD_IOC_AVAILABLE_MEMORY AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
720 #define AMDKFD_IOC_EXPORT_DMABUF AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
721 #define AMDKFD_IOC_RUNTIME_ENABLE AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
722 #define AMDKFD_IOC_DBG_TRAP AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
723 #define AMDKFD_COMMAND_START 0x01
724 #define AMDKFD_COMMAND_END 0x27
725 #endif
726