1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef KFD_IOCTL_H_INCLUDED 24 #define KFD_IOCTL_H_INCLUDED 25 26 #include <drm/drm.h> 27 #include <linux/ioctl.h> 28 29 /* 30 * - 1.1 - initial version 31 * - 1.3 - Add SMI events support 32 * - 1.4 - Indicate new SRAM EDC bit in device properties 33 * - 1.5 - Add SVM API 34 * - 1.6 - Query clear flags in SVM get_attr API 35 * - 1.7 - Checkpoint Restore (CRIU) API 36 * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs 37 * - 1.9 - Add available memory ioctl 38 * - 1.10 - Add SMI profiler event log 39 * - 1.11 - Add unified memory for ctx save/restore area 40 * - 1.12 - Add DMA buf export ioctl 41 * - 1.13 - Add debugger API 42 * - 1.14 - Update kfd_event_data 43 * - 1.15 - Enable managing mappings in compute VMs with GEM_VA ioctl 44 * - 1.16 - Add contiguous VRAM allocation flag 45 * - 1.17 - Add SDMA queue creation with target SDMA engine ID 46 */ 47 #define KFD_IOCTL_MAJOR_VERSION 1 48 #define KFD_IOCTL_MINOR_VERSION 17 49 50 struct kfd_ioctl_get_version_args { 51 __u32 major_version; /* from KFD */ 52 __u32 minor_version; /* from KFD */ 53 }; 54 55 /* For kfd_ioctl_create_queue_args.queue_type. */ 56 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0 57 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1 58 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2 59 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3 60 #define KFD_IOC_QUEUE_TYPE_SDMA_BY_ENG_ID 0x4 61 62 #define KFD_MAX_QUEUE_PERCENTAGE 100 63 #define KFD_MAX_QUEUE_PRIORITY 15 64 65 #define KFD_MIN_QUEUE_RING_SIZE 1024 66 67 struct kfd_ioctl_create_queue_args { 68 __u64 ring_base_address; /* to KFD */ 69 __u64 write_pointer_address; /* from KFD */ 70 __u64 read_pointer_address; /* from KFD */ 71 __u64 doorbell_offset; /* from KFD */ 72 73 __u32 ring_size; /* to KFD */ 74 __u32 gpu_id; /* to KFD */ 75 __u32 queue_type; /* to KFD */ 76 __u32 queue_percentage; /* to KFD */ 77 __u32 queue_priority; /* to KFD */ 78 __u32 queue_id; /* from KFD */ 79 80 __u64 eop_buffer_address; /* to KFD */ 81 __u64 eop_buffer_size; /* to KFD */ 82 __u64 ctx_save_restore_address; /* to KFD */ 83 __u32 ctx_save_restore_size; /* to KFD */ 84 __u32 ctl_stack_size; /* to KFD */ 85 __u32 sdma_engine_id; /* to KFD */ 86 __u32 pad; 87 }; 88 89 struct kfd_ioctl_destroy_queue_args { 90 __u32 queue_id; /* to KFD */ 91 __u32 pad; 92 }; 93 94 struct kfd_ioctl_update_queue_args { 95 __u64 ring_base_address; /* to KFD */ 96 97 __u32 queue_id; /* to KFD */ 98 __u32 ring_size; /* to KFD */ 99 __u32 queue_percentage; /* to KFD */ 100 __u32 queue_priority; /* to KFD */ 101 }; 102 103 struct kfd_ioctl_set_cu_mask_args { 104 __u32 queue_id; /* to KFD */ 105 __u32 num_cu_mask; /* to KFD */ 106 __u64 cu_mask_ptr; /* to KFD */ 107 }; 108 109 struct kfd_ioctl_get_queue_wave_state_args { 110 __u64 ctl_stack_address; /* to KFD */ 111 __u32 ctl_stack_used_size; /* from KFD */ 112 __u32 save_area_used_size; /* from KFD */ 113 __u32 queue_id; /* to KFD */ 114 __u32 pad; 115 }; 116 117 struct kfd_ioctl_get_available_memory_args { 118 __u64 available; /* from KFD */ 119 __u32 gpu_id; /* to KFD */ 120 __u32 pad; 121 }; 122 123 struct kfd_dbg_device_info_entry { 124 __u64 exception_status; 125 __u64 lds_base; 126 __u64 lds_limit; 127 __u64 scratch_base; 128 __u64 scratch_limit; 129 __u64 gpuvm_base; 130 __u64 gpuvm_limit; 131 __u32 gpu_id; 132 __u32 location_id; 133 __u32 vendor_id; 134 __u32 device_id; 135 __u32 revision_id; 136 __u32 subsystem_vendor_id; 137 __u32 subsystem_device_id; 138 __u32 fw_version; 139 __u32 gfx_target_version; 140 __u32 simd_count; 141 __u32 max_waves_per_simd; 142 __u32 array_count; 143 __u32 simd_arrays_per_engine; 144 __u32 num_xcc; 145 __u32 capability; 146 __u32 debug_prop; 147 }; 148 149 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ 150 #define KFD_IOC_CACHE_POLICY_COHERENT 0 151 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 152 153 struct kfd_ioctl_set_memory_policy_args { 154 __u64 alternate_aperture_base; /* to KFD */ 155 __u64 alternate_aperture_size; /* to KFD */ 156 157 __u32 gpu_id; /* to KFD */ 158 __u32 default_policy; /* to KFD */ 159 __u32 alternate_policy; /* to KFD */ 160 __u32 pad; 161 }; 162 163 /* 164 * All counters are monotonic. They are used for profiling of compute jobs. 165 * The profiling is done by userspace. 166 * 167 * In case of GPU reset, the counter should not be affected. 168 */ 169 170 struct kfd_ioctl_get_clock_counters_args { 171 __u64 gpu_clock_counter; /* from KFD */ 172 __u64 cpu_clock_counter; /* from KFD */ 173 __u64 system_clock_counter; /* from KFD */ 174 __u64 system_clock_freq; /* from KFD */ 175 176 __u32 gpu_id; /* to KFD */ 177 __u32 pad; 178 }; 179 180 struct kfd_process_device_apertures { 181 __u64 lds_base; /* from KFD */ 182 __u64 lds_limit; /* from KFD */ 183 __u64 scratch_base; /* from KFD */ 184 __u64 scratch_limit; /* from KFD */ 185 __u64 gpuvm_base; /* from KFD */ 186 __u64 gpuvm_limit; /* from KFD */ 187 __u32 gpu_id; /* from KFD */ 188 __u32 pad; 189 }; 190 191 /* 192 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use 193 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an 194 * unlimited number of GPUs. 195 */ 196 #define NUM_OF_SUPPORTED_GPUS 7 197 struct kfd_ioctl_get_process_apertures_args { 198 struct kfd_process_device_apertures 199 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */ 200 201 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */ 202 __u32 num_of_nodes; 203 __u32 pad; 204 }; 205 206 struct kfd_ioctl_get_process_apertures_new_args { 207 /* User allocated. Pointer to struct kfd_process_device_apertures 208 * filled in by Kernel 209 */ 210 __u64 kfd_process_device_apertures_ptr; 211 /* to KFD - indicates amount of memory present in 212 * kfd_process_device_apertures_ptr 213 * from KFD - Number of entries filled by KFD. 214 */ 215 __u32 num_of_nodes; 216 __u32 pad; 217 }; 218 219 #define MAX_ALLOWED_NUM_POINTS 100 220 #define MAX_ALLOWED_AW_BUFF_SIZE 4096 221 #define MAX_ALLOWED_WAC_BUFF_SIZE 128 222 223 struct kfd_ioctl_dbg_register_args { 224 __u32 gpu_id; /* to KFD */ 225 __u32 pad; 226 }; 227 228 struct kfd_ioctl_dbg_unregister_args { 229 __u32 gpu_id; /* to KFD */ 230 __u32 pad; 231 }; 232 233 struct kfd_ioctl_dbg_address_watch_args { 234 __u64 content_ptr; /* a pointer to the actual content */ 235 __u32 gpu_id; /* to KFD */ 236 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 237 }; 238 239 struct kfd_ioctl_dbg_wave_control_args { 240 __u64 content_ptr; /* a pointer to the actual content */ 241 __u32 gpu_id; /* to KFD */ 242 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ 243 }; 244 245 #define KFD_INVALID_FD 0xffffffff 246 247 /* Matching HSA_EVENTTYPE */ 248 #define KFD_IOC_EVENT_SIGNAL 0 249 #define KFD_IOC_EVENT_NODECHANGE 1 250 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2 251 #define KFD_IOC_EVENT_HW_EXCEPTION 3 252 #define KFD_IOC_EVENT_SYSTEM_EVENT 4 253 #define KFD_IOC_EVENT_DEBUG_EVENT 5 254 #define KFD_IOC_EVENT_PROFILE_EVENT 6 255 #define KFD_IOC_EVENT_QUEUE_EVENT 7 256 #define KFD_IOC_EVENT_MEMORY 8 257 258 #define KFD_IOC_WAIT_RESULT_COMPLETE 0 259 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1 260 #define KFD_IOC_WAIT_RESULT_FAIL 2 261 262 #define KFD_SIGNAL_EVENT_LIMIT 4096 263 264 /* For kfd_event_data.hw_exception_data.reset_type. */ 265 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0 266 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1 267 268 /* For kfd_event_data.hw_exception_data.reset_cause. */ 269 #define KFD_HW_EXCEPTION_GPU_HANG 0 270 #define KFD_HW_EXCEPTION_ECC 1 271 272 /* For kfd_hsa_memory_exception_data.ErrorType */ 273 #define KFD_MEM_ERR_NO_RAS 0 274 #define KFD_MEM_ERR_SRAM_ECC 1 275 #define KFD_MEM_ERR_POISON_CONSUMED 2 276 #define KFD_MEM_ERR_GPU_HANG 3 277 278 struct kfd_ioctl_create_event_args { 279 __u64 event_page_offset; /* from KFD */ 280 __u32 event_trigger_data; /* from KFD - signal events only */ 281 __u32 event_type; /* to KFD */ 282 __u32 auto_reset; /* to KFD */ 283 __u32 node_id; /* to KFD - only valid for certain 284 event types */ 285 __u32 event_id; /* from KFD */ 286 __u32 event_slot_index; /* from KFD */ 287 }; 288 289 struct kfd_ioctl_destroy_event_args { 290 __u32 event_id; /* to KFD */ 291 __u32 pad; 292 }; 293 294 struct kfd_ioctl_set_event_args { 295 __u32 event_id; /* to KFD */ 296 __u32 pad; 297 }; 298 299 struct kfd_ioctl_reset_event_args { 300 __u32 event_id; /* to KFD */ 301 __u32 pad; 302 }; 303 304 struct kfd_memory_exception_failure { 305 __u32 NotPresent; /* Page not present or supervisor privilege */ 306 __u32 ReadOnly; /* Write access to a read-only page */ 307 __u32 NoExecute; /* Execute access to a page marked NX */ 308 __u32 imprecise; /* Can't determine the exact fault address */ 309 }; 310 311 /* memory exception data */ 312 struct kfd_hsa_memory_exception_data { 313 struct kfd_memory_exception_failure failure; 314 __u64 va; 315 __u32 gpu_id; 316 __u32 ErrorType; /* 0 = no RAS error, 317 * 1 = ECC_SRAM, 318 * 2 = Link_SYNFLOOD (poison), 319 * 3 = GPU hang (not attributable to a specific cause), 320 * other values reserved 321 */ 322 }; 323 324 /* hw exception data */ 325 struct kfd_hsa_hw_exception_data { 326 __u32 reset_type; 327 __u32 reset_cause; 328 __u32 memory_lost; 329 __u32 gpu_id; 330 }; 331 332 /* hsa signal event data */ 333 struct kfd_hsa_signal_event_data { 334 __u64 last_event_age; /* to and from KFD */ 335 }; 336 337 /* Event data */ 338 struct kfd_event_data { 339 union { 340 /* From KFD */ 341 struct kfd_hsa_memory_exception_data memory_exception_data; 342 struct kfd_hsa_hw_exception_data hw_exception_data; 343 /* To and From KFD */ 344 struct kfd_hsa_signal_event_data signal_event_data; 345 }; 346 __u64 kfd_event_data_ext; /* pointer to an extension structure 347 for future exception types */ 348 __u32 event_id; /* to KFD */ 349 __u32 pad; 350 }; 351 352 struct kfd_ioctl_wait_events_args { 353 __u64 events_ptr; /* pointed to struct 354 kfd_event_data array, to KFD */ 355 __u32 num_events; /* to KFD */ 356 __u32 wait_for_all; /* to KFD */ 357 __u32 timeout; /* to KFD */ 358 __u32 wait_result; /* from KFD */ 359 }; 360 361 struct kfd_ioctl_set_scratch_backing_va_args { 362 __u64 va_addr; /* to KFD */ 363 __u32 gpu_id; /* to KFD */ 364 __u32 pad; 365 }; 366 367 struct kfd_ioctl_get_tile_config_args { 368 /* to KFD: pointer to tile array */ 369 __u64 tile_config_ptr; 370 /* to KFD: pointer to macro tile array */ 371 __u64 macro_tile_config_ptr; 372 /* to KFD: array size allocated by user mode 373 * from KFD: array size filled by kernel 374 */ 375 __u32 num_tile_configs; 376 /* to KFD: array size allocated by user mode 377 * from KFD: array size filled by kernel 378 */ 379 __u32 num_macro_tile_configs; 380 381 __u32 gpu_id; /* to KFD */ 382 __u32 gb_addr_config; /* from KFD */ 383 __u32 num_banks; /* from KFD */ 384 __u32 num_ranks; /* from KFD */ 385 /* struct size can be extended later if needed 386 * without breaking ABI compatibility 387 */ 388 }; 389 390 struct kfd_ioctl_set_trap_handler_args { 391 __u64 tba_addr; /* to KFD */ 392 __u64 tma_addr; /* to KFD */ 393 __u32 gpu_id; /* to KFD */ 394 __u32 pad; 395 }; 396 397 struct kfd_ioctl_acquire_vm_args { 398 __u32 drm_fd; /* to KFD */ 399 __u32 gpu_id; /* to KFD */ 400 }; 401 402 /* Allocation flags: memory types */ 403 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0) 404 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1) 405 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2) 406 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3) 407 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4) 408 /* Allocation flags: attributes/access options */ 409 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31) 410 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30) 411 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29) 412 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) 413 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27) 414 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26) 415 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25) 416 #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT (1 << 24) 417 #define KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS (1 << 23) 418 419 /* Allocate memory for later SVM (shared virtual memory) mapping. 420 * 421 * @va_addr: virtual address of the memory to be allocated 422 * all later mappings on all GPUs will use this address 423 * @size: size in bytes 424 * @handle: buffer handle returned to user mode, used to refer to 425 * this allocation for mapping, unmapping and freeing 426 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node 427 * for userptrs this is overloaded to specify the CPU address 428 * @gpu_id: device identifier 429 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above 430 */ 431 struct kfd_ioctl_alloc_memory_of_gpu_args { 432 __u64 va_addr; /* to KFD */ 433 __u64 size; /* to KFD */ 434 __u64 handle; /* from KFD */ 435 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */ 436 __u32 gpu_id; /* to KFD */ 437 __u32 flags; 438 }; 439 440 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu 441 * 442 * @handle: memory handle returned by alloc 443 */ 444 struct kfd_ioctl_free_memory_of_gpu_args { 445 __u64 handle; /* to KFD */ 446 }; 447 448 /* Map memory to one or more GPUs 449 * 450 * @handle: memory handle returned by alloc 451 * @device_ids_array_ptr: array of gpu_ids (__u32 per device) 452 * @n_devices: number of devices in the array 453 * @n_success: number of devices mapped successfully 454 * 455 * @n_success returns information to the caller how many devices from 456 * the start of the array have mapped the buffer successfully. It can 457 * be passed into a subsequent retry call to skip those devices. For 458 * the first call the caller should initialize it to 0. 459 * 460 * If the ioctl completes with return code 0 (success), n_success == 461 * n_devices. 462 */ 463 struct kfd_ioctl_map_memory_to_gpu_args { 464 __u64 handle; /* to KFD */ 465 __u64 device_ids_array_ptr; /* to KFD */ 466 __u32 n_devices; /* to KFD */ 467 __u32 n_success; /* to/from KFD */ 468 }; 469 470 /* Unmap memory from one or more GPUs 471 * 472 * same arguments as for mapping 473 */ 474 struct kfd_ioctl_unmap_memory_from_gpu_args { 475 __u64 handle; /* to KFD */ 476 __u64 device_ids_array_ptr; /* to KFD */ 477 __u32 n_devices; /* to KFD */ 478 __u32 n_success; /* to/from KFD */ 479 }; 480 481 /* Allocate GWS for specific queue 482 * 483 * @queue_id: queue's id that GWS is allocated for 484 * @num_gws: how many GWS to allocate 485 * @first_gws: index of the first GWS allocated. 486 * only support contiguous GWS allocation 487 */ 488 struct kfd_ioctl_alloc_queue_gws_args { 489 __u32 queue_id; /* to KFD */ 490 __u32 num_gws; /* to KFD */ 491 __u32 first_gws; /* from KFD */ 492 __u32 pad; 493 }; 494 495 struct kfd_ioctl_get_dmabuf_info_args { 496 __u64 size; /* from KFD */ 497 __u64 metadata_ptr; /* to KFD */ 498 __u32 metadata_size; /* to KFD (space allocated by user) 499 * from KFD (actual metadata size) 500 */ 501 __u32 gpu_id; /* from KFD */ 502 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */ 503 __u32 dmabuf_fd; /* to KFD */ 504 }; 505 506 struct kfd_ioctl_import_dmabuf_args { 507 __u64 va_addr; /* to KFD */ 508 __u64 handle; /* from KFD */ 509 __u32 gpu_id; /* to KFD */ 510 __u32 dmabuf_fd; /* to KFD */ 511 }; 512 513 struct kfd_ioctl_export_dmabuf_args { 514 __u64 handle; /* to KFD */ 515 __u32 flags; /* to KFD */ 516 __u32 dmabuf_fd; /* from KFD */ 517 }; 518 519 /* 520 * KFD SMI(System Management Interface) events 521 */ 522 enum kfd_smi_event { 523 KFD_SMI_EVENT_NONE = 0, /* not used */ 524 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */ 525 KFD_SMI_EVENT_THERMAL_THROTTLE = 2, 526 KFD_SMI_EVENT_GPU_PRE_RESET = 3, 527 KFD_SMI_EVENT_GPU_POST_RESET = 4, 528 KFD_SMI_EVENT_MIGRATE_START = 5, 529 KFD_SMI_EVENT_MIGRATE_END = 6, 530 KFD_SMI_EVENT_PAGE_FAULT_START = 7, 531 KFD_SMI_EVENT_PAGE_FAULT_END = 8, 532 KFD_SMI_EVENT_QUEUE_EVICTION = 9, 533 KFD_SMI_EVENT_QUEUE_RESTORE = 10, 534 KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, 535 536 /* 537 * max event number, as a flag bit to get events from all processes, 538 * this requires super user permission, otherwise will not be able to 539 * receive event from any process. Without this flag to receive events 540 * from same process. 541 */ 542 KFD_SMI_EVENT_ALL_PROCESS = 64 543 }; 544 545 /* The reason of the page migration event */ 546 enum KFD_MIGRATE_TRIGGERS { 547 KFD_MIGRATE_TRIGGER_PREFETCH, /* Prefetch to GPU VRAM or system memory */ 548 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, /* GPU page fault recover */ 549 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, /* CPU page fault recover */ 550 KFD_MIGRATE_TRIGGER_TTM_EVICTION /* TTM eviction */ 551 }; 552 553 /* The reason of user queue evition event */ 554 enum KFD_QUEUE_EVICTION_TRIGGERS { 555 KFD_QUEUE_EVICTION_TRIGGER_SVM, /* SVM buffer migration */ 556 KFD_QUEUE_EVICTION_TRIGGER_USERPTR, /* userptr movement */ 557 KFD_QUEUE_EVICTION_TRIGGER_TTM, /* TTM move buffer */ 558 KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, /* GPU suspend */ 559 KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, /* CRIU checkpoint */ 560 KFD_QUEUE_EVICTION_CRIU_RESTORE /* CRIU restore */ 561 }; 562 563 /* The reason of unmap buffer from GPU event */ 564 enum KFD_SVM_UNMAP_TRIGGERS { 565 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, /* MMU notifier CPU buffer movement */ 566 KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,/* MMU notifier page migration */ 567 KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU /* Unmap to free the buffer */ 568 }; 569 570 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) 571 #define KFD_SMI_EVENT_MSG_SIZE 96 572 573 struct kfd_ioctl_smi_events_args { 574 __u32 gpuid; /* to KFD */ 575 __u32 anon_fd; /* from KFD */ 576 }; 577 578 /* 579 * SVM event tracing via SMI system management interface 580 * 581 * Open event file descriptor 582 * use ioctl AMDKFD_IOC_SMI_EVENTS, pass in gpuid and return a anonymous file 583 * descriptor to receive SMI events. 584 * If calling with sudo permission, then file descriptor can be used to receive 585 * SVM events from all processes, otherwise, to only receive SVM events of same 586 * process. 587 * 588 * To enable the SVM event 589 * Write event file descriptor with KFD_SMI_EVENT_MASK_FROM_INDEX(event) bitmap 590 * mask to start record the event to the kfifo, use bitmap mask combination 591 * for multiple events. New event mask will overwrite the previous event mask. 592 * KFD_SMI_EVENT_MASK_FROM_INDEX(KFD_SMI_EVENT_ALL_PROCESS) bit requires sudo 593 * permisson to receive SVM events from all process. 594 * 595 * To receive the event 596 * Application can poll file descriptor to wait for the events, then read event 597 * from the file into a buffer. Each event is one line string message, starting 598 * with the event id, then the event specific information. 599 * 600 * To decode event information 601 * The following event format string macro can be used with sscanf to decode 602 * the specific event information. 603 * event triggers: the reason to generate the event, defined as enum for unmap, 604 * eviction and migrate events. 605 * node, from, to, prefetch_loc, preferred_loc: GPU ID, or 0 for system memory. 606 * addr: user mode address, in pages 607 * size: in pages 608 * pid: the process ID to generate the event 609 * ns: timestamp in nanosecond-resolution, starts at system boot time but 610 * stops during suspend 611 * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update 612 * rw: 'W' for write page fault, 'R' for read page fault 613 * rescheduled: 'R' if the queue restore failed and rescheduled to try again 614 */ 615 #define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\ 616 "%x %s\n", (reset_seq_num), (reset_cause) 617 618 #define KFD_EVENT_FMT_THERMAL_THROTTLING(bitmask, counter)\ 619 "%llx:%llx\n", (bitmask), (counter) 620 621 #define KFD_EVENT_FMT_VMFAULT(pid, task_name)\ 622 "%x:%s\n", (pid), (task_name) 623 624 #define KFD_EVENT_FMT_PAGEFAULT_START(ns, pid, addr, node, rw)\ 625 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (rw) 626 627 #define KFD_EVENT_FMT_PAGEFAULT_END(ns, pid, addr, node, migrate_update)\ 628 "%lld -%d @%lx(%x) %c\n", (ns), (pid), (addr), (node), (migrate_update) 629 630 #define KFD_EVENT_FMT_MIGRATE_START(ns, pid, start, size, from, to, prefetch_loc,\ 631 preferred_loc, migrate_trigger)\ 632 "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\ 633 (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger) 634 635 #define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\ 636 "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\ 637 (from), (to), (migrate_trigger) 638 639 #define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\ 640 "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger) 641 642 #define KFD_EVENT_FMT_QUEUE_RESTORE(ns, pid, node, rescheduled)\ 643 "%lld -%d %x %c\n", (ns), (pid), (node), (rescheduled) 644 645 #define KFD_EVENT_FMT_UNMAP_FROM_GPU(ns, pid, addr, size, node, unmap_trigger)\ 646 "%lld -%d @%lx(%lx) %x %d\n", (ns), (pid), (addr), (size),\ 647 (node), (unmap_trigger) 648 649 /************************************************************************************************** 650 * CRIU IOCTLs (Checkpoint Restore In Userspace) 651 * 652 * When checkpointing a process, the userspace application will perform: 653 * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts 654 * all the queues. 655 * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges) 656 * 3. UNPAUSE op to un-evict all the queues 657 * 658 * When restoring a process, the CRIU userspace application will perform: 659 * 660 * 1. RESTORE op to restore process contents 661 * 2. RESUME op to start the process 662 * 663 * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User 664 * application needs to perform an UNPAUSE operation after calling PROCESS_INFO. 665 */ 666 667 enum kfd_criu_op { 668 KFD_CRIU_OP_PROCESS_INFO, 669 KFD_CRIU_OP_CHECKPOINT, 670 KFD_CRIU_OP_UNPAUSE, 671 KFD_CRIU_OP_RESTORE, 672 KFD_CRIU_OP_RESUME, 673 }; 674 675 /** 676 * kfd_ioctl_criu_args - Arguments perform CRIU operation 677 * @devices: [in/out] User pointer to memory location for devices information. 678 * This is an array of type kfd_criu_device_bucket. 679 * @bos: [in/out] User pointer to memory location for BOs information 680 * This is an array of type kfd_criu_bo_bucket. 681 * @priv_data: [in/out] User pointer to memory location for private data 682 * @priv_data_size: [in/out] Size of priv_data in bytes 683 * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array. 684 * @num_bos [in/out] Number of BOs used by process. Size of @bos array. 685 * @num_objects: [in/out] Number of objects used by process. Objects are opaque to 686 * user application. 687 * @pid: [in/out] PID of the process being checkpointed 688 * @op [in] Type of operation (kfd_criu_op) 689 * 690 * Return: 0 on success, -errno on failure 691 */ 692 struct kfd_ioctl_criu_args { 693 __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */ 694 __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */ 695 __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */ 696 __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */ 697 __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */ 698 __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */ 699 __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */ 700 __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */ 701 __u32 op; 702 }; 703 704 struct kfd_criu_device_bucket { 705 __u32 user_gpu_id; 706 __u32 actual_gpu_id; 707 __u32 drm_fd; 708 __u32 pad; 709 }; 710 711 struct kfd_criu_bo_bucket { 712 __u64 addr; 713 __u64 size; 714 __u64 offset; 715 __u64 restored_offset; /* During restore, updated offset for BO */ 716 __u32 gpu_id; /* This is the user_gpu_id */ 717 __u32 alloc_flags; 718 __u32 dmabuf_fd; 719 __u32 pad; 720 }; 721 722 /* CRIU IOCTLs - END */ 723 /**************************************************************************************************/ 724 725 /* Register offset inside the remapped mmio page 726 */ 727 enum kfd_mmio_remap { 728 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0, 729 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4, 730 }; 731 732 /* Guarantee host access to memory */ 733 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001 734 /* Fine grained coherency between all devices with access */ 735 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002 736 /* Use any GPU in same hive as preferred device */ 737 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004 738 /* GPUs only read, allows replication */ 739 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008 740 /* Allow execution on GPU */ 741 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 742 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ 743 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 744 /* Keep GPU memory mapping always valid as if XNACK is disable */ 745 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 746 /* Fine grained coherency between all devices using device-scope atomics */ 747 #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT 0x00000080 748 749 /** 750 * kfd_ioctl_svm_op - SVM ioctl operations 751 * 752 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes 753 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes 754 */ 755 enum kfd_ioctl_svm_op { 756 KFD_IOCTL_SVM_OP_SET_ATTR, 757 KFD_IOCTL_SVM_OP_GET_ATTR 758 }; 759 760 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations 761 * 762 * GPU IDs are used to specify GPUs as preferred and prefetch locations. 763 * Below definitions are used for system memory or for leaving the preferred 764 * location unspecified. 765 */ 766 enum kfd_ioctl_svm_location { 767 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0, 768 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff 769 }; 770 771 /** 772 * kfd_ioctl_svm_attr_type - SVM attribute types 773 * 774 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for 775 * system memory 776 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for 777 * system memory. Setting this triggers an 778 * immediate prefetch (migration). 779 * @KFD_IOCTL_SVM_ATTR_ACCESS: 780 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 781 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given 782 * by the attribute value 783 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see 784 * KFD_IOCTL_SVM_FLAG_...) 785 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear 786 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity 787 * (log2 num pages) 788 */ 789 enum kfd_ioctl_svm_attr_type { 790 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC, 791 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC, 792 KFD_IOCTL_SVM_ATTR_ACCESS, 793 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE, 794 KFD_IOCTL_SVM_ATTR_NO_ACCESS, 795 KFD_IOCTL_SVM_ATTR_SET_FLAGS, 796 KFD_IOCTL_SVM_ATTR_CLR_FLAGS, 797 KFD_IOCTL_SVM_ATTR_GRANULARITY 798 }; 799 800 /** 801 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value 802 * 803 * The meaning of the @value depends on the attribute type. 804 * 805 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type) 806 * @value: attribute value 807 */ 808 struct kfd_ioctl_svm_attribute { 809 __u32 type; 810 __u32 value; 811 }; 812 813 /** 814 * kfd_ioctl_svm_args - Arguments for SVM ioctl 815 * 816 * @op specifies the operation to perform (see enum 817 * @kfd_ioctl_svm_op). @start_addr and @size are common for all 818 * operations. 819 * 820 * A variable number of attributes can be given in @attrs. 821 * @nattr specifies the number of attributes. New attributes can be 822 * added in the future without breaking the ABI. If unknown attributes 823 * are given, the function returns -EINVAL. 824 * 825 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address 826 * range. It may overlap existing virtual address ranges. If it does, 827 * the existing ranges will be split such that the attribute changes 828 * only apply to the specified address range. 829 * 830 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes 831 * over all memory in the given range and returns the result as the 832 * attribute value. If different pages have different preferred or 833 * prefetch locations, 0xffffffff will be returned for 834 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or 835 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For 836 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be 837 * aggregated by bitwise AND. That means, a flag will be set in the 838 * output, if that flag is set for all pages in the range. For 839 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be 840 * aggregated by bitwise NOR. That means, a flag will be set in the 841 * output, if that flag is clear for all pages in the range. 842 * The minimum migration granularity throughout the range will be 843 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY. 844 * 845 * Querying of accessibility attributes works by initializing the 846 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the 847 * GPUID being queried. Multiple attributes can be given to allow 848 * querying multiple GPUIDs. The ioctl function overwrites the 849 * attribute type to indicate the access for the specified GPU. 850 */ 851 struct kfd_ioctl_svm_args { 852 __u64 start_addr; 853 __u64 size; 854 __u32 op; 855 __u32 nattr; 856 /* Variable length array of attributes */ 857 struct kfd_ioctl_svm_attribute attrs[]; 858 }; 859 860 /** 861 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode 862 * 863 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process 864 * 865 * @xnack_enabled indicates whether recoverable page faults should be 866 * enabled for the current process. 0 means disabled, positive means 867 * enabled, negative means leave unchanged. If enabled, virtual address 868 * translations on GFXv9 and later AMD GPUs can return XNACK and retry 869 * the access until a valid PTE is available. This is used to implement 870 * device page faults. 871 * 872 * On output, @xnack_enabled returns the (new) current mode (0 or 873 * positive). Therefore, a negative input value can be used to query 874 * the current mode without changing it. 875 * 876 * The XNACK mode fundamentally changes the way SVM managed memory works 877 * in the driver, with subtle effects on application performance and 878 * functionality. 879 * 880 * Enabling XNACK mode requires shader programs to be compiled 881 * differently. Furthermore, not all GPUs support changing the mode 882 * per-process. Therefore changing the mode is only allowed while no 883 * user mode queues exist in the process. This ensure that no shader 884 * code is running that may be compiled for the wrong mode. And GPUs 885 * that cannot change to the requested mode will prevent the XNACK 886 * mode from occurring. All GPUs used by the process must be in the 887 * same XNACK mode. 888 * 889 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM. 890 * Therefore those GPUs are not considered for the XNACK mode switch. 891 * 892 * Return: 0 on success, -errno on failure 893 */ 894 struct kfd_ioctl_set_xnack_mode_args { 895 __s32 xnack_enabled; 896 }; 897 898 /* Wave launch override modes */ 899 enum kfd_dbg_trap_override_mode { 900 KFD_DBG_TRAP_OVERRIDE_OR = 0, 901 KFD_DBG_TRAP_OVERRIDE_REPLACE = 1 902 }; 903 904 /* Wave launch overrides */ 905 enum kfd_dbg_trap_mask { 906 KFD_DBG_TRAP_MASK_FP_INVALID = 1, 907 KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2, 908 KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4, 909 KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8, 910 KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16, 911 KFD_DBG_TRAP_MASK_FP_INEXACT = 32, 912 KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64, 913 KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128, 914 KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256, 915 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30), 916 KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31) 917 }; 918 919 /* Wave launch modes */ 920 enum kfd_dbg_trap_wave_launch_mode { 921 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0, 922 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1, 923 KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3 924 }; 925 926 /* Address watch modes */ 927 enum kfd_dbg_trap_address_watch_mode { 928 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0, 929 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1, 930 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2, 931 KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3 932 }; 933 934 /* Additional wave settings */ 935 enum kfd_dbg_trap_flags { 936 KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1, 937 KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2, 938 }; 939 940 /* Trap exceptions */ 941 enum kfd_dbg_trap_exception_code { 942 EC_NONE = 0, 943 /* per queue */ 944 EC_QUEUE_WAVE_ABORT = 1, 945 EC_QUEUE_WAVE_TRAP = 2, 946 EC_QUEUE_WAVE_MATH_ERROR = 3, 947 EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4, 948 EC_QUEUE_WAVE_MEMORY_VIOLATION = 5, 949 EC_QUEUE_WAVE_APERTURE_VIOLATION = 6, 950 EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16, 951 EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17, 952 EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18, 953 EC_QUEUE_PACKET_RESERVED = 19, 954 EC_QUEUE_PACKET_UNSUPPORTED = 20, 955 EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21, 956 EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22, 957 EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23, 958 EC_QUEUE_PREEMPTION_ERROR = 30, 959 EC_QUEUE_NEW = 31, 960 /* per device */ 961 EC_DEVICE_QUEUE_DELETE = 32, 962 EC_DEVICE_MEMORY_VIOLATION = 33, 963 EC_DEVICE_RAS_ERROR = 34, 964 EC_DEVICE_FATAL_HALT = 35, 965 EC_DEVICE_NEW = 36, 966 /* per process */ 967 EC_PROCESS_RUNTIME = 48, 968 EC_PROCESS_DEVICE_REMOVE = 49, 969 EC_MAX 970 }; 971 972 /* Mask generated by ecode in kfd_dbg_trap_exception_code */ 973 #define KFD_EC_MASK(ecode) (1ULL << (ecode - 1)) 974 975 /* Masks for exception code type checks below */ 976 #define KFD_EC_MASK_QUEUE (KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) | \ 977 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) | \ 978 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) | \ 979 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) | \ 980 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) | \ 981 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) | \ 982 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 983 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 984 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 985 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 986 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 987 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 988 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 989 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED) | \ 990 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR) | \ 991 KFD_EC_MASK(EC_QUEUE_NEW)) 992 #define KFD_EC_MASK_DEVICE (KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) | \ 993 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) | \ 994 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) | \ 995 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) | \ 996 KFD_EC_MASK(EC_DEVICE_NEW)) 997 #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ 998 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) 999 #define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ 1000 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ 1001 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ 1002 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ 1003 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ 1004 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ 1005 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ 1006 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)) 1007 1008 /* Checks for exception code types for KFD search */ 1009 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX) 1010 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ 1011 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) 1012 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ 1013 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) 1014 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ 1015 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) 1016 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \ 1017 (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET)) 1018 1019 1020 /* Runtime enable states */ 1021 enum kfd_dbg_runtime_state { 1022 DEBUG_RUNTIME_STATE_DISABLED = 0, 1023 DEBUG_RUNTIME_STATE_ENABLED = 1, 1024 DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2, 1025 DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3 1026 }; 1027 1028 /* Runtime enable status */ 1029 struct kfd_runtime_info { 1030 __u64 r_debug; 1031 __u32 runtime_state; 1032 __u32 ttmp_setup; 1033 }; 1034 1035 /* Enable modes for runtime enable */ 1036 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK 1 1037 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK 2 1038 1039 /** 1040 * kfd_ioctl_runtime_enable_args - Arguments for runtime enable 1041 * 1042 * Coordinates debug exception signalling and debug device enablement with runtime. 1043 * 1044 * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger 1045 * @mode_mask - mask to set mode 1046 * KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable 1047 * KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable) 1048 * @capabilities_mask - mask to notify runtime on what KFD supports 1049 * 1050 * Return - 0 on SUCCESS. 1051 * - EBUSY if runtime enable call already pending. 1052 * - EEXIST if user queues already active prior to call. 1053 * If process is debug enabled, runtime enable will enable debug devices and 1054 * wait for debugger process to send runtime exception EC_PROCESS_RUNTIME 1055 * to unblock - see kfd_ioctl_dbg_trap_args. 1056 * 1057 */ 1058 struct kfd_ioctl_runtime_enable_args { 1059 __u64 r_debug; 1060 __u32 mode_mask; 1061 __u32 capabilities_mask; 1062 }; 1063 1064 /* Queue information */ 1065 struct kfd_queue_snapshot_entry { 1066 __u64 exception_status; 1067 __u64 ring_base_address; 1068 __u64 write_pointer_address; 1069 __u64 read_pointer_address; 1070 __u64 ctx_save_restore_address; 1071 __u32 queue_id; 1072 __u32 gpu_id; 1073 __u32 ring_size; 1074 __u32 queue_type; 1075 __u32 ctx_save_restore_area_size; 1076 __u32 reserved; 1077 }; 1078 1079 /* Queue status return for suspend/resume */ 1080 #define KFD_DBG_QUEUE_ERROR_BIT 30 1081 #define KFD_DBG_QUEUE_INVALID_BIT 31 1082 #define KFD_DBG_QUEUE_ERROR_MASK (1 << KFD_DBG_QUEUE_ERROR_BIT) 1083 #define KFD_DBG_QUEUE_INVALID_MASK (1 << KFD_DBG_QUEUE_INVALID_BIT) 1084 1085 /* Context save area header information */ 1086 struct kfd_context_save_area_header { 1087 struct { 1088 __u32 control_stack_offset; 1089 __u32 control_stack_size; 1090 __u32 wave_state_offset; 1091 __u32 wave_state_size; 1092 } wave_state; 1093 __u32 debug_offset; 1094 __u32 debug_size; 1095 __u64 err_payload_addr; 1096 __u32 err_event_id; 1097 __u32 reserved1; 1098 }; 1099 1100 /* 1101 * Debug operations 1102 * 1103 * For specifics on usage and return values, see documentation per operation 1104 * below. Otherwise, generic error returns apply: 1105 * - ESRCH if the process to debug does not exist. 1106 * 1107 * - EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation 1108 * KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior. 1109 * Also returns this error if GPU hardware scheduling is not supported. 1110 * 1111 * - EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not 1112 * PTRACE_ATTACHED. KFD_IOC_DBG_TRAP_DISABLE is exempt to allow 1113 * clean up of debug mode as long as process is debug enabled. 1114 * 1115 * - EACCES if any DBG_HW_OP (debug hardware operation) is requested when 1116 * AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior. 1117 * 1118 * - ENODEV if any GPU does not support debugging on a DBG_HW_OP call. 1119 * 1120 * - Other errors may be returned when a DBG_HW_OP occurs while the GPU 1121 * is in a fatal state. 1122 * 1123 */ 1124 enum kfd_dbg_trap_operations { 1125 KFD_IOC_DBG_TRAP_ENABLE = 0, 1126 KFD_IOC_DBG_TRAP_DISABLE = 1, 1127 KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2, 1128 KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3, 1129 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4, /* DBG_HW_OP */ 1130 KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5, /* DBG_HW_OP */ 1131 KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6, /* DBG_HW_OP */ 1132 KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7, /* DBG_HW_OP */ 1133 KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8, /* DBG_HW_OP */ 1134 KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9, /* DBG_HW_OP */ 1135 KFD_IOC_DBG_TRAP_SET_FLAGS = 10, 1136 KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11, 1137 KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12, 1138 KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13, 1139 KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14 1140 }; 1141 1142 /** 1143 * kfd_ioctl_dbg_trap_enable_args 1144 * 1145 * Arguments for KFD_IOC_DBG_TRAP_ENABLE. 1146 * 1147 * Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in 1148 * kfd_ioctl_dbg_trap_args to disable debug session. 1149 * 1150 * @exception_mask (IN) - exceptions to raise to the debugger 1151 * @rinfo_ptr (IN) - pointer to runtime info buffer (see kfd_runtime_info) 1152 * @rinfo_size (IN/OUT) - size of runtime info buffer in bytes 1153 * @dbg_fd (IN) - fd the KFD will nofify the debugger with of raised 1154 * exceptions set in exception_mask. 1155 * 1156 * Generic errors apply (see kfd_dbg_trap_operations). 1157 * Return - 0 on SUCCESS. 1158 * Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable. 1159 * Size of kfd_runtime saved by the KFD returned to @rinfo_size. 1160 * - EBADF if KFD cannot get a reference to dbg_fd. 1161 * - EFAULT if KFD cannot copy runtime info to rinfo_ptr. 1162 * - EINVAL if target process is already debug enabled. 1163 * 1164 */ 1165 struct kfd_ioctl_dbg_trap_enable_args { 1166 __u64 exception_mask; 1167 __u64 rinfo_ptr; 1168 __u32 rinfo_size; 1169 __u32 dbg_fd; 1170 }; 1171 1172 /** 1173 * kfd_ioctl_dbg_trap_send_runtime_event_args 1174 * 1175 * 1176 * Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT. 1177 * Raises exceptions to runtime. 1178 * 1179 * @exception_mask (IN) - exceptions to raise to runtime 1180 * @gpu_id (IN) - target device id 1181 * @queue_id (IN) - target queue id 1182 * 1183 * Generic errors apply (see kfd_dbg_trap_operations). 1184 * Return - 0 on SUCCESS. 1185 * - ENODEV if gpu_id not found. 1186 * If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending 1187 * AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args. 1188 * All other exceptions are raised to runtime through err_payload_addr. 1189 * See kfd_context_save_area_header. 1190 */ 1191 struct kfd_ioctl_dbg_trap_send_runtime_event_args { 1192 __u64 exception_mask; 1193 __u32 gpu_id; 1194 __u32 queue_id; 1195 }; 1196 1197 /** 1198 * kfd_ioctl_dbg_trap_set_exceptions_enabled_args 1199 * 1200 * Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED 1201 * Set new exceptions to be raised to the debugger. 1202 * 1203 * @exception_mask (IN) - new exceptions to raise the debugger 1204 * 1205 * Generic errors apply (see kfd_dbg_trap_operations). 1206 * Return - 0 on SUCCESS. 1207 */ 1208 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args { 1209 __u64 exception_mask; 1210 }; 1211 1212 /** 1213 * kfd_ioctl_dbg_trap_set_wave_launch_override_args 1214 * 1215 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE 1216 * Enable HW exceptions to raise trap. 1217 * 1218 * @override_mode (IN) - see kfd_dbg_trap_override_mode 1219 * @enable_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1220 * IN is the override modes requested to be enabled. 1221 * OUT is referenced in Return below. 1222 * @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask. 1223 * IN is the override modes requested for support check. 1224 * OUT is referenced in Return below. 1225 * 1226 * Generic errors apply (see kfd_dbg_trap_operations). 1227 * Return - 0 on SUCCESS. 1228 * Previous enablement is returned in @enable_mask. 1229 * Actual override support is returned in @support_request_mask. 1230 * - EINVAL if override mode is not supported. 1231 * - EACCES if trap support requested is not actually supported. 1232 * i.e. enable_mask (IN) is not a subset of support_request_mask (OUT). 1233 * Otherwise it is considered a generic error (see kfd_dbg_trap_operations). 1234 */ 1235 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args { 1236 __u32 override_mode; 1237 __u32 enable_mask; 1238 __u32 support_request_mask; 1239 __u32 pad; 1240 }; 1241 1242 /** 1243 * kfd_ioctl_dbg_trap_set_wave_launch_mode_args 1244 * 1245 * Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE 1246 * Set wave launch mode. 1247 * 1248 * @mode (IN) - see kfd_dbg_trap_wave_launch_mode 1249 * 1250 * Generic errors apply (see kfd_dbg_trap_operations). 1251 * Return - 0 on SUCCESS. 1252 */ 1253 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args { 1254 __u32 launch_mode; 1255 __u32 pad; 1256 }; 1257 1258 /** 1259 * kfd_ioctl_dbg_trap_suspend_queues_ags 1260 * 1261 * Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES 1262 * Suspend queues. 1263 * 1264 * @exception_mask (IN) - raised exceptions to clear 1265 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1266 * to suspend 1267 * @num_queues (IN) - number of queues to suspend in @queue_array_ptr 1268 * @grace_period (IN) - wave time allowance before preemption 1269 * per 1K GPU clock cycle unit 1270 * 1271 * Generic errors apply (see kfd_dbg_trap_operations). 1272 * Destruction of a suspended queue is blocked until the queue is 1273 * resumed. This allows the debugger to access queue information and 1274 * the its context save area without running into a race condition on 1275 * queue destruction. 1276 * Automatically copies per queue context save area header information 1277 * into the save area base 1278 * (see kfd_queue_snapshot_entry and kfd_context_save_area_header). 1279 * 1280 * Return - Number of queues suspended on SUCCESS. 1281 * . KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked 1282 * for each queue id in @queue_array_ptr array reports unsuccessful 1283 * suspend reason. 1284 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1285 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or 1286 * is being destroyed. 1287 */ 1288 struct kfd_ioctl_dbg_trap_suspend_queues_args { 1289 __u64 exception_mask; 1290 __u64 queue_array_ptr; 1291 __u32 num_queues; 1292 __u32 grace_period; 1293 }; 1294 1295 /** 1296 * kfd_ioctl_dbg_trap_resume_queues_args 1297 * 1298 * Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES 1299 * Resume queues. 1300 * 1301 * @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id) 1302 * to resume 1303 * @num_queues (IN) - number of queues to resume in @queue_array_ptr 1304 * 1305 * Generic errors apply (see kfd_dbg_trap_operations). 1306 * Return - Number of queues resumed on SUCCESS. 1307 * KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask 1308 * for each queue id in @queue_array_ptr array reports unsuccessful 1309 * resume reason. 1310 * KFD_DBG_QUEUE_ERROR_MASK = HW failure. 1311 * KFD_DBG_QUEUE_INVALID_MASK = queue does not exist. 1312 */ 1313 struct kfd_ioctl_dbg_trap_resume_queues_args { 1314 __u64 queue_array_ptr; 1315 __u32 num_queues; 1316 __u32 pad; 1317 }; 1318 1319 /** 1320 * kfd_ioctl_dbg_trap_set_node_address_watch_args 1321 * 1322 * Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH 1323 * Sets address watch for device. 1324 * 1325 * @address (IN) - watch address to set 1326 * @mode (IN) - see kfd_dbg_trap_address_watch_mode 1327 * @mask (IN) - watch address mask 1328 * @gpu_id (IN) - target gpu to set watch point 1329 * @id (OUT) - watch id allocated 1330 * 1331 * Generic errors apply (see kfd_dbg_trap_operations). 1332 * Return - 0 on SUCCESS. 1333 * Allocated watch ID returned to @id. 1334 * - ENODEV if gpu_id not found. 1335 * - ENOMEM if watch IDs can be allocated 1336 */ 1337 struct kfd_ioctl_dbg_trap_set_node_address_watch_args { 1338 __u64 address; 1339 __u32 mode; 1340 __u32 mask; 1341 __u32 gpu_id; 1342 __u32 id; 1343 }; 1344 1345 /** 1346 * kfd_ioctl_dbg_trap_clear_node_address_watch_args 1347 * 1348 * Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH 1349 * Clear address watch for device. 1350 * 1351 * @gpu_id (IN) - target device to clear watch point 1352 * @id (IN) - allocated watch id to clear 1353 * 1354 * Generic errors apply (see kfd_dbg_trap_operations). 1355 * Return - 0 on SUCCESS. 1356 * - ENODEV if gpu_id not found. 1357 * - EINVAL if watch ID has not been allocated. 1358 */ 1359 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args { 1360 __u32 gpu_id; 1361 __u32 id; 1362 }; 1363 1364 /** 1365 * kfd_ioctl_dbg_trap_set_flags_args 1366 * 1367 * Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS 1368 * Sets flags for wave behaviour. 1369 * 1370 * @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled 1371 * 1372 * Generic errors apply (see kfd_dbg_trap_operations). 1373 * Return - 0 on SUCCESS. 1374 * - EACCESS if any debug device does not allow flag options. 1375 */ 1376 struct kfd_ioctl_dbg_trap_set_flags_args { 1377 __u32 flags; 1378 __u32 pad; 1379 }; 1380 1381 /** 1382 * kfd_ioctl_dbg_trap_query_debug_event_args 1383 * 1384 * Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT 1385 * 1386 * Find one or more raised exceptions. This function can return multiple 1387 * exceptions from a single queue or a single device with one call. To find 1388 * all raised exceptions, this function must be called repeatedly until it 1389 * returns -EAGAIN. Returned exceptions can optionally be cleared by 1390 * setting the corresponding bit in the @exception_mask input parameter. 1391 * However, clearing an exception prevents retrieving further information 1392 * about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO. 1393 * 1394 * @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT) 1395 * @gpu_id (OUT) - gpu id of exceptions raised 1396 * @queue_id (OUT) - queue id of exceptions raised 1397 * 1398 * Generic errors apply (see kfd_dbg_trap_operations). 1399 * Return - 0 on raised exception found 1400 * Raised exceptions found are returned in @exception mask 1401 * with reported source id returned in @gpu_id or @queue_id. 1402 * - EAGAIN if no raised exception has been found 1403 */ 1404 struct kfd_ioctl_dbg_trap_query_debug_event_args { 1405 __u64 exception_mask; 1406 __u32 gpu_id; 1407 __u32 queue_id; 1408 }; 1409 1410 /** 1411 * kfd_ioctl_dbg_trap_query_exception_info_args 1412 * 1413 * Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO 1414 * Get additional info on raised exception. 1415 * 1416 * @info_ptr (IN) - pointer to exception info buffer to copy to 1417 * @info_size (IN/OUT) - exception info buffer size (bytes) 1418 * @source_id (IN) - target gpu or queue id 1419 * @exception_code (IN) - target exception 1420 * @clear_exception (IN) - clear raised @exception_code exception 1421 * (0 = false, 1 = true) 1422 * 1423 * Generic errors apply (see kfd_dbg_trap_operations). 1424 * Return - 0 on SUCCESS. 1425 * If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT) 1426 * bytes of memory exception data to @info_ptr. 1427 * If @exception_code is EC_PROCESS_RUNTIME, copy saved 1428 * kfd_runtime_info to @info_ptr. 1429 * Actual required @info_ptr size (bytes) is returned in @info_size. 1430 */ 1431 struct kfd_ioctl_dbg_trap_query_exception_info_args { 1432 __u64 info_ptr; 1433 __u32 info_size; 1434 __u32 source_id; 1435 __u32 exception_code; 1436 __u32 clear_exception; 1437 }; 1438 1439 /** 1440 * kfd_ioctl_dbg_trap_get_queue_snapshot_args 1441 * 1442 * Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT 1443 * Get queue information. 1444 * 1445 * @exception_mask (IN) - exceptions raised to clear 1446 * @snapshot_buf_ptr (IN) - queue snapshot entry buffer (see kfd_queue_snapshot_entry) 1447 * @num_queues (IN/OUT) - number of queue snapshot entries 1448 * The debugger specifies the size of the array allocated in @num_queues. 1449 * KFD returns the number of queues that actually existed. If this is 1450 * larger than the size specified by the debugger, KFD will not overflow 1451 * the array allocated by the debugger. 1452 * 1453 * @entry_size (IN/OUT) - size per entry in bytes 1454 * The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in 1455 * @entry_size. KFD returns the number of bytes actually populated per 1456 * entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine, 1457 * which fields in struct kfd_queue_snapshot_entry are valid. This allows 1458 * growing the ABI in a backwards compatible manner. 1459 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1460 * event that it's larger than actual kfd_queue_snapshot_entry. 1461 * 1462 * Generic errors apply (see kfd_dbg_trap_operations). 1463 * Return - 0 on SUCCESS. 1464 * Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN) 1465 * into @snapshot_buf_ptr if @num_queues(IN) > 0. 1466 * Otherwise return @num_queues(OUT) queue snapshot entries that exist. 1467 */ 1468 struct kfd_ioctl_dbg_trap_queue_snapshot_args { 1469 __u64 exception_mask; 1470 __u64 snapshot_buf_ptr; 1471 __u32 num_queues; 1472 __u32 entry_size; 1473 }; 1474 1475 /** 1476 * kfd_ioctl_dbg_trap_get_device_snapshot_args 1477 * 1478 * Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT 1479 * Get device information. 1480 * 1481 * @exception_mask (IN) - exceptions raised to clear 1482 * @snapshot_buf_ptr (IN) - pointer to snapshot buffer (see kfd_dbg_device_info_entry) 1483 * @num_devices (IN/OUT) - number of debug devices to snapshot 1484 * The debugger specifies the size of the array allocated in @num_devices. 1485 * KFD returns the number of devices that actually existed. If this is 1486 * larger than the size specified by the debugger, KFD will not overflow 1487 * the array allocated by the debugger. 1488 * 1489 * @entry_size (IN/OUT) - size per entry in bytes 1490 * The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in 1491 * @entry_size. KFD returns the number of bytes actually populated. The 1492 * debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields 1493 * in struct kfd_dbg_device_info_entry are valid. This allows growing the 1494 * ABI in a backwards compatible manner. 1495 * Note that entry_size(IN) should still be used to stride the snapshot buffer in the 1496 * event that it's larger than actual kfd_dbg_device_info_entry. 1497 * 1498 * Generic errors apply (see kfd_dbg_trap_operations). 1499 * Return - 0 on SUCCESS. 1500 * Copies @num_devices(IN) device snapshot entries of size @entry_size(IN) 1501 * into @snapshot_buf_ptr if @num_devices(IN) > 0. 1502 * Otherwise return @num_devices(OUT) queue snapshot entries that exist. 1503 */ 1504 struct kfd_ioctl_dbg_trap_device_snapshot_args { 1505 __u64 exception_mask; 1506 __u64 snapshot_buf_ptr; 1507 __u32 num_devices; 1508 __u32 entry_size; 1509 }; 1510 1511 /** 1512 * kfd_ioctl_dbg_trap_args 1513 * 1514 * Arguments to debug target process. 1515 * 1516 * @pid - target process to debug 1517 * @op - debug operation (see kfd_dbg_trap_operations) 1518 * 1519 * @op determines which union struct args to use. 1520 * Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct. 1521 */ 1522 struct kfd_ioctl_dbg_trap_args { 1523 __u32 pid; 1524 __u32 op; 1525 1526 union { 1527 struct kfd_ioctl_dbg_trap_enable_args enable; 1528 struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event; 1529 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled; 1530 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override; 1531 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode; 1532 struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues; 1533 struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues; 1534 struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch; 1535 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch; 1536 struct kfd_ioctl_dbg_trap_set_flags_args set_flags; 1537 struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event; 1538 struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info; 1539 struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot; 1540 struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot; 1541 }; 1542 }; 1543 1544 #define AMDKFD_IOCTL_BASE 'K' 1545 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) 1546 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) 1547 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) 1548 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) 1549 1550 #define AMDKFD_IOC_GET_VERSION \ 1551 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) 1552 1553 #define AMDKFD_IOC_CREATE_QUEUE \ 1554 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) 1555 1556 #define AMDKFD_IOC_DESTROY_QUEUE \ 1557 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) 1558 1559 #define AMDKFD_IOC_SET_MEMORY_POLICY \ 1560 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) 1561 1562 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \ 1563 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) 1564 1565 #define AMDKFD_IOC_GET_PROCESS_APERTURES \ 1566 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) 1567 1568 #define AMDKFD_IOC_UPDATE_QUEUE \ 1569 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) 1570 1571 #define AMDKFD_IOC_CREATE_EVENT \ 1572 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args) 1573 1574 #define AMDKFD_IOC_DESTROY_EVENT \ 1575 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args) 1576 1577 #define AMDKFD_IOC_SET_EVENT \ 1578 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args) 1579 1580 #define AMDKFD_IOC_RESET_EVENT \ 1581 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args) 1582 1583 #define AMDKFD_IOC_WAIT_EVENTS \ 1584 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) 1585 1586 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \ 1587 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) 1588 1589 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \ 1590 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) 1591 1592 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \ 1593 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) 1594 1595 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \ 1596 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) 1597 1598 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ 1599 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args) 1600 1601 #define AMDKFD_IOC_GET_TILE_CONFIG \ 1602 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args) 1603 1604 #define AMDKFD_IOC_SET_TRAP_HANDLER \ 1605 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args) 1606 1607 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \ 1608 AMDKFD_IOWR(0x14, \ 1609 struct kfd_ioctl_get_process_apertures_new_args) 1610 1611 #define AMDKFD_IOC_ACQUIRE_VM \ 1612 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args) 1613 1614 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \ 1615 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args) 1616 1617 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \ 1618 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args) 1619 1620 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \ 1621 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args) 1622 1623 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \ 1624 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args) 1625 1626 #define AMDKFD_IOC_SET_CU_MASK \ 1627 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args) 1628 1629 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \ 1630 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args) 1631 1632 #define AMDKFD_IOC_GET_DMABUF_INFO \ 1633 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args) 1634 1635 #define AMDKFD_IOC_IMPORT_DMABUF \ 1636 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) 1637 1638 #define AMDKFD_IOC_ALLOC_QUEUE_GWS \ 1639 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) 1640 1641 #define AMDKFD_IOC_SMI_EVENTS \ 1642 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args) 1643 1644 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args) 1645 1646 #define AMDKFD_IOC_SET_XNACK_MODE \ 1647 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) 1648 1649 #define AMDKFD_IOC_CRIU_OP \ 1650 AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) 1651 1652 #define AMDKFD_IOC_AVAILABLE_MEMORY \ 1653 AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) 1654 1655 #define AMDKFD_IOC_EXPORT_DMABUF \ 1656 AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args) 1657 1658 #define AMDKFD_IOC_RUNTIME_ENABLE \ 1659 AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args) 1660 1661 #define AMDKFD_IOC_DBG_TRAP \ 1662 AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args) 1663 1664 #define AMDKFD_COMMAND_START 0x01 1665 #define AMDKFD_COMMAND_END 0x27 1666 1667 #endif 1668