1 /**************************************************************************** 2 **************************************************************************** 3 *** 4 *** This header was automatically generated from a Linux kernel header 5 *** of the same name, to make information necessary for userspace to 6 *** call into the kernel available to libc. It contains only constants, 7 *** structures, and macros generated from the original header, and thus, 8 *** contains no copyrightable information. 9 *** 10 *** To edit the content of this header, modify the corresponding 11 *** source file (e.g. under external/kernel-headers/original/) then 12 *** run bionic/libc/kernel/tools/update_all.py 13 *** 14 *** Any manual change here will be lost the next time this script will 15 *** be run. You've been warned! 16 *** 17 **************************************************************************** 18 ****************************************************************************/ 19 #ifndef KFD_IOCTL_H_INCLUDED 20 #define KFD_IOCTL_H_INCLUDED 21 #include <drm/drm.h> 22 #include <linux/ioctl.h> 23 #define KFD_IOCTL_MAJOR_VERSION 1 24 #define KFD_IOCTL_MINOR_VERSION 11 25 struct kfd_ioctl_get_version_args { 26 __u32 major_version; 27 __u32 minor_version; 28 }; 29 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0 30 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1 31 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2 32 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3 33 #define KFD_MAX_QUEUE_PERCENTAGE 100 34 #define KFD_MAX_QUEUE_PRIORITY 15 35 struct kfd_ioctl_create_queue_args { 36 __u64 ring_base_address; 37 __u64 write_pointer_address; 38 __u64 read_pointer_address; 39 __u64 doorbell_offset; 40 __u32 ring_size; 41 __u32 gpu_id; 42 __u32 queue_type; 43 __u32 queue_percentage; 44 __u32 queue_priority; 45 __u32 queue_id; 46 __u64 eop_buffer_address; 47 __u64 eop_buffer_size; 48 __u64 ctx_save_restore_address; 49 __u32 ctx_save_restore_size; 50 __u32 ctl_stack_size; 51 }; 52 struct kfd_ioctl_destroy_queue_args { 53 __u32 queue_id; 54 __u32 pad; 55 }; 56 struct kfd_ioctl_update_queue_args { 57 __u64 ring_base_address; 58 __u32 queue_id; 59 __u32 ring_size; 60 __u32 queue_percentage; 61 __u32 queue_priority; 62 }; 63 struct kfd_ioctl_set_cu_mask_args { 64 __u32 queue_id; 65 __u32 num_cu_mask; 66 __u64 cu_mask_ptr; 67 }; 68 struct kfd_ioctl_get_queue_wave_state_args { 69 __u64 ctl_stack_address; 70 __u32 ctl_stack_used_size; 71 __u32 save_area_used_size; 72 __u32 queue_id; 73 __u32 pad; 74 }; 75 struct kfd_ioctl_get_available_memory_args { 76 __u64 available; 77 __u32 gpu_id; 78 __u32 pad; 79 }; 80 #define KFD_IOC_CACHE_POLICY_COHERENT 0 81 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 82 struct kfd_ioctl_set_memory_policy_args { 83 __u64 alternate_aperture_base; 84 __u64 alternate_aperture_size; 85 __u32 gpu_id; 86 __u32 default_policy; 87 __u32 alternate_policy; 88 __u32 pad; 89 }; 90 struct kfd_ioctl_get_clock_counters_args { 91 __u64 gpu_clock_counter; 92 __u64 cpu_clock_counter; 93 __u64 system_clock_counter; 94 __u64 system_clock_freq; 95 __u32 gpu_id; 96 __u32 pad; 97 }; 98 struct kfd_process_device_apertures { 99 __u64 lds_base; 100 __u64 lds_limit; 101 __u64 scratch_base; 102 __u64 scratch_limit; 103 __u64 gpuvm_base; 104 __u64 gpuvm_limit; 105 __u32 gpu_id; 106 __u32 pad; 107 }; 108 #define NUM_OF_SUPPORTED_GPUS 7 109 struct kfd_ioctl_get_process_apertures_args { 110 struct kfd_process_device_apertures process_apertures[NUM_OF_SUPPORTED_GPUS]; 111 __u32 num_of_nodes; 112 __u32 pad; 113 }; 114 struct kfd_ioctl_get_process_apertures_new_args { 115 __u64 kfd_process_device_apertures_ptr; 116 __u32 num_of_nodes; 117 __u32 pad; 118 }; 119 #define MAX_ALLOWED_NUM_POINTS 100 120 #define MAX_ALLOWED_AW_BUFF_SIZE 4096 121 #define MAX_ALLOWED_WAC_BUFF_SIZE 128 122 struct kfd_ioctl_dbg_register_args { 123 __u32 gpu_id; 124 __u32 pad; 125 }; 126 struct kfd_ioctl_dbg_unregister_args { 127 __u32 gpu_id; 128 __u32 pad; 129 }; 130 struct kfd_ioctl_dbg_address_watch_args { 131 __u64 content_ptr; 132 __u32 gpu_id; 133 __u32 buf_size_in_bytes; 134 }; 135 struct kfd_ioctl_dbg_wave_control_args { 136 __u64 content_ptr; 137 __u32 gpu_id; 138 __u32 buf_size_in_bytes; 139 }; 140 #define KFD_INVALID_FD 0xffffffff 141 #define KFD_IOC_EVENT_SIGNAL 0 142 #define KFD_IOC_EVENT_NODECHANGE 1 143 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2 144 #define KFD_IOC_EVENT_HW_EXCEPTION 3 145 #define KFD_IOC_EVENT_SYSTEM_EVENT 4 146 #define KFD_IOC_EVENT_DEBUG_EVENT 5 147 #define KFD_IOC_EVENT_PROFILE_EVENT 6 148 #define KFD_IOC_EVENT_QUEUE_EVENT 7 149 #define KFD_IOC_EVENT_MEMORY 8 150 #define KFD_IOC_WAIT_RESULT_COMPLETE 0 151 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1 152 #define KFD_IOC_WAIT_RESULT_FAIL 2 153 #define KFD_SIGNAL_EVENT_LIMIT 4096 154 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0 155 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1 156 #define KFD_HW_EXCEPTION_GPU_HANG 0 157 #define KFD_HW_EXCEPTION_ECC 1 158 #define KFD_MEM_ERR_NO_RAS 0 159 #define KFD_MEM_ERR_SRAM_ECC 1 160 #define KFD_MEM_ERR_POISON_CONSUMED 2 161 #define KFD_MEM_ERR_GPU_HANG 3 162 struct kfd_ioctl_create_event_args { 163 __u64 event_page_offset; 164 __u32 event_trigger_data; 165 __u32 event_type; 166 __u32 auto_reset; 167 __u32 node_id; 168 __u32 event_id; 169 __u32 event_slot_index; 170 }; 171 struct kfd_ioctl_destroy_event_args { 172 __u32 event_id; 173 __u32 pad; 174 }; 175 struct kfd_ioctl_set_event_args { 176 __u32 event_id; 177 __u32 pad; 178 }; 179 struct kfd_ioctl_reset_event_args { 180 __u32 event_id; 181 __u32 pad; 182 }; 183 struct kfd_memory_exception_failure { 184 __u32 NotPresent; 185 __u32 ReadOnly; 186 __u32 NoExecute; 187 __u32 imprecise; 188 }; 189 struct kfd_hsa_memory_exception_data { 190 struct kfd_memory_exception_failure failure; 191 __u64 va; 192 __u32 gpu_id; 193 __u32 ErrorType; 194 }; 195 struct kfd_hsa_hw_exception_data { 196 __u32 reset_type; 197 __u32 reset_cause; 198 __u32 memory_lost; 199 __u32 gpu_id; 200 }; 201 struct kfd_event_data { 202 union { 203 struct kfd_hsa_memory_exception_data memory_exception_data; 204 struct kfd_hsa_hw_exception_data hw_exception_data; 205 }; 206 __u64 kfd_event_data_ext; 207 __u32 event_id; 208 __u32 pad; 209 }; 210 struct kfd_ioctl_wait_events_args { 211 __u64 events_ptr; 212 __u32 num_events; 213 __u32 wait_for_all; 214 __u32 timeout; 215 __u32 wait_result; 216 }; 217 struct kfd_ioctl_set_scratch_backing_va_args { 218 __u64 va_addr; 219 __u32 gpu_id; 220 __u32 pad; 221 }; 222 struct kfd_ioctl_get_tile_config_args { 223 __u64 tile_config_ptr; 224 __u64 macro_tile_config_ptr; 225 __u32 num_tile_configs; 226 __u32 num_macro_tile_configs; 227 __u32 gpu_id; 228 __u32 gb_addr_config; 229 __u32 num_banks; 230 __u32 num_ranks; 231 }; 232 struct kfd_ioctl_set_trap_handler_args { 233 __u64 tba_addr; 234 __u64 tma_addr; 235 __u32 gpu_id; 236 __u32 pad; 237 }; 238 struct kfd_ioctl_acquire_vm_args { 239 __u32 drm_fd; 240 __u32 gpu_id; 241 }; 242 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0) 243 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) 244 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) 245 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) 246 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) 247 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) 248 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) 249 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29) 250 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) 251 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) 252 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) 253 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) 254 struct kfd_ioctl_alloc_memory_of_gpu_args { 255 __u64 va_addr; 256 __u64 size; 257 __u64 handle; 258 __u64 mmap_offset; 259 __u32 gpu_id; 260 __u32 flags; 261 }; 262 struct kfd_ioctl_free_memory_of_gpu_args { 263 __u64 handle; 264 }; 265 struct kfd_ioctl_map_memory_to_gpu_args { 266 __u64 handle; 267 __u64 device_ids_array_ptr; 268 __u32 n_devices; 269 __u32 n_success; 270 }; 271 struct kfd_ioctl_unmap_memory_from_gpu_args { 272 __u64 handle; 273 __u64 device_ids_array_ptr; 274 __u32 n_devices; 275 __u32 n_success; 276 }; 277 struct kfd_ioctl_alloc_queue_gws_args { 278 __u32 queue_id; 279 __u32 num_gws; 280 __u32 first_gws; 281 __u32 pad; 282 }; 283 struct kfd_ioctl_get_dmabuf_info_args { 284 __u64 size; 285 __u64 metadata_ptr; 286 __u32 metadata_size; 287 __u32 gpu_id; 288 __u32 flags; 289 __u32 dmabuf_fd; 290 }; 291 struct kfd_ioctl_import_dmabuf_args { 292 __u64 va_addr; 293 __u64 handle; 294 __u32 gpu_id; 295 __u32 dmabuf_fd; 296 }; 297 enum kfd_smi_event { 298 KFD_SMI_EVENT_NONE = 0, 299 KFD_SMI_EVENT_VMFAULT = 1, 300 KFD_SMI_EVENT_THERMAL_THROTTLE = 2, 301 KFD_SMI_EVENT_GPU_PRE_RESET = 3, 302 KFD_SMI_EVENT_GPU_POST_RESET = 4, 303 KFD_SMI_EVENT_MIGRATE_START = 5, 304 KFD_SMI_EVENT_MIGRATE_END = 6, 305 KFD_SMI_EVENT_PAGE_FAULT_START = 7, 306 KFD_SMI_EVENT_PAGE_FAULT_END = 8, 307 KFD_SMI_EVENT_QUEUE_EVICTION = 9, 308 KFD_SMI_EVENT_QUEUE_RESTORE = 10, 309 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, 310 KFD_SMI_EVENT_ALL_PROCESS = 64 311 }; 312 enum KFD_MIGRATE_TRIGGERS { 313 KFD_MIGRATE_TRIGGER_PREFETCH, 314 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 315 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, 316 KFD_MIGRATE_TRIGGER_TTM_EVICTION 317 }; 318 enum KFD_QUEUE_EVICTION_TRIGGERS { 319 KFD_QUEUE_EVICTION_TRIGGER_SVM, 320 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, 321 KFD_QUEUE_EVICTION_TRIGGER_TTM, 322 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, 323 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, 324 KFD_QUEUE_EVICTION_CRIU_RESTORE 325 }; 326 enum KFD_SVM_UNMAP_TRIGGERS { 327 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, 328 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE, 329 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU 330 }; 331 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) 332 #define KFD_SMI_EVENT_MSG_SIZE 96 333 struct kfd_ioctl_smi_events_args { 334 __u32 gpuid; 335 __u32 anon_fd; 336 }; 337 enum kfd_criu_op { 338 KFD_CRIU_OP_PROCESS_INFO, 339 KFD_CRIU_OP_CHECKPOINT, 340 KFD_CRIU_OP_UNPAUSE, 341 KFD_CRIU_OP_RESTORE, 342 KFD_CRIU_OP_RESUME, 343 }; 344 struct kfd_ioctl_criu_args { 345 __u64 devices; 346 __u64 bos; 347 __u64 priv_data; 348 __u64 priv_data_size; 349 __u32 num_devices; 350 __u32 num_bos; 351 __u32 num_objects; 352 __u32 pid; 353 __u32 op; 354 }; 355 struct kfd_criu_device_bucket { 356 __u32 user_gpu_id; 357 __u32 actual_gpu_id; 358 __u32 drm_fd; 359 __u32 pad; 360 }; 361 struct kfd_criu_bo_bucket { 362 __u64 addr; 363 __u64 size; 364 __u64 offset; 365 __u64 restored_offset; 366 __u32 gpu_id; 367 __u32 alloc_flags; 368 __u32 dmabuf_fd; 369 __u32 pad; 370 }; 371 enum kfd_mmio_remap { 372 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0, 373 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, 374 }; 375 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 376 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 377 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 378 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 379 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 380 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 381 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 382 enum kfd_ioctl_svm_op { 383 KFD_IOCTL_SVM_OP_SET_ATTR, 384 KFD_IOCTL_SVM_OP_GET_ATTR 385 }; 386 enum kfd_ioctl_svm_location { 387 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, 388 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff 389 }; 390 enum kfd_ioctl_svm_attr_type { 391 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, 392 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, 393 KFD_IOCTL_SVM_ATTR_ACCESS, 394 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, 395 KFD_IOCTL_SVM_ATTR_NO_ACCESS, 396 KFD_IOCTL_SVM_ATTR_SET_FLAGS, 397 KFD_IOCTL_SVM_ATTR_CLR_FLAGS, 398 KFD_IOCTL_SVM_ATTR_GRANULARITY 399 }; 400 struct kfd_ioctl_svm_attribute { 401 __u32 type; 402 __u32 value; 403 }; 404 struct kfd_ioctl_svm_args { 405 __u64 start_addr; 406 __u64 size; 407 __u32 op; 408 __u32 nattr; 409 struct kfd_ioctl_svm_attribute attrs[]; 410 }; 411 struct kfd_ioctl_set_xnack_mode_args { 412 __s32 xnack_enabled; 413 }; 414 #define AMDKFD_IOCTL_BASE 'K' 415 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 416 #define AMDKFD_IOR(nr,type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 417 #define AMDKFD_IOW(nr,type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 418 #define AMDKFD_IOWR(nr,type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 419 #define AMDKFD_IOC_GET_VERSION AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 420 #define AMDKFD_IOC_CREATE_QUEUE AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 421 #define AMDKFD_IOC_DESTROY_QUEUE AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 422 #define AMDKFD_IOC_SET_MEMORY_POLICY AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 423 #define AMDKFD_IOC_GET_CLOCK_COUNTERS AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 424 #define AMDKFD_IOC_GET_PROCESS_APERTURES AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 425 #define AMDKFD_IOC_UPDATE_QUEUE AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 426 #define AMDKFD_IOC_CREATE_EVENT AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args) 427 #define AMDKFD_IOC_DESTROY_EVENT AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args) 428 #define AMDKFD_IOC_SET_EVENT AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args) 429 #define AMDKFD_IOC_RESET_EVENT AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args) 430 #define AMDKFD_IOC_WAIT_EVENTS AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) 431 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) 432 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) 433 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) 434 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) 435 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) 436 #define AMDKFD_IOC_GET_TILE_CONFIG AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) 437 #define AMDKFD_IOC_SET_TRAP_HANDLER AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) 438 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW AMDKFD_IOWR(0x14, struct kfd_ioctl_get_process_apertures_new_args) 439 #define AMDKFD_IOC_ACQUIRE_VM AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args) 440 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args) 441 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args) 442 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args) 443 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) 444 #define AMDKFD_IOC_SET_CU_MASK AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) 445 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) 446 #define AMDKFD_IOC_GET_DMABUF_INFO AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) 447 #define AMDKFD_IOC_IMPORT_DMABUF AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) 448 #define AMDKFD_IOC_ALLOC_QUEUE_GWS AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) 449 #define AMDKFD_IOC_SMI_EVENTS AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) 450 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) 451 #define AMDKFD_IOC_SET_XNACK_MODE AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) 452 #define AMDKFD_IOC_CRIU_OP AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) 453 #define AMDKFD_IOC_AVAILABLE_MEMORY AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) 454 #define AMDKFD_COMMAND_START 0x01 455 #define AMDKFD_COMMAND_END 0x24 456 #endif 457