1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 /** 25 * \file amdgpu.h 26 * 27 * Declare public libdrm_amdgpu API 28 * 29 * This file define API exposed by libdrm_amdgpu library. 30 * User wanted to use libdrm_amdgpu functionality must include 31 * this file. 32 * 33 */ 34 #ifndef _AMDGPU_H_ 35 #define _AMDGPU_H_ 36 37 #include <stdint.h> 38 #include <stdbool.h> 39 40 #ifdef __cplusplus 41 extern "C" { 42 #endif 43 44 struct drm_amdgpu_info_hw_ip; 45 struct drm_amdgpu_bo_list_entry; 46 47 /*--------------------------------------------------------------------------*/ 48 /* --------------------------- Defines ------------------------------------ */ 49 /*--------------------------------------------------------------------------*/ 50 51 /** 52 * Define max. number of Command Buffers (IB) which could be sent to the single 53 * hardware IP to accommodate CE/DE requirements 54 * 55 * \sa amdgpu_cs_ib_info 56 */ 57 #define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4 58 59 /** 60 * Special timeout value meaning that the timeout is infinite. 61 */ 62 #define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull 63 64 /** 65 * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout 66 * is absolute. 67 */ 68 #define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE (1 << 0) 69 70 /*--------------------------------------------------------------------------*/ 71 /* ----------------------------- Enums ------------------------------------ */ 72 /*--------------------------------------------------------------------------*/ 73 74 /** 75 * Enum describing possible handle types 76 * 77 * \sa amdgpu_bo_import, amdgpu_bo_export 78 * 79 */ 80 enum amdgpu_bo_handle_type { 81 /** GEM flink name (needs DRM authentication, used by DRI2) */ 82 amdgpu_bo_handle_type_gem_flink_name = 0, 83 84 /** KMS handle which is used by all driver ioctls */ 85 amdgpu_bo_handle_type_kms = 1, 86 87 /** DMA-buf fd handle */ 88 amdgpu_bo_handle_type_dma_buf_fd = 2, 89 90 /** Deprecated in favour of and same behaviour as 91 * amdgpu_bo_handle_type_kms, use that instead of this 92 */ 93 amdgpu_bo_handle_type_kms_noimport = 3, 94 }; 95 96 /** Define known types of GPU VM VA ranges */ 97 enum amdgpu_gpu_va_range 98 { 99 /** Allocate from "normal"/general range */ 100 amdgpu_gpu_va_range_general = 0 101 }; 102 103 enum amdgpu_sw_info { 104 amdgpu_sw_info_address32_hi = 0, 105 }; 106 107 /*--------------------------------------------------------------------------*/ 108 /* -------------------------- Datatypes ----------------------------------- */ 109 /*--------------------------------------------------------------------------*/ 110 111 /** 112 * Define opaque pointer to context associated with fd. 113 * This context will be returned as the result of 114 * "initialize" function and should be pass as the first 115 * parameter to any API call 116 */ 117 typedef struct amdgpu_device *amdgpu_device_handle; 118 119 /** 120 * Define GPU Context type as pointer to opaque structure 121 * Example of GPU Context is the "rendering" context associated 122 * with OpenGL context (glCreateContext) 123 */ 124 typedef struct amdgpu_context *amdgpu_context_handle; 125 126 /** 127 * Define handle for amdgpu resources: buffer, GDS, etc. 128 */ 129 typedef struct amdgpu_bo *amdgpu_bo_handle; 130 131 /** 132 * Define handle for list of BOs 133 */ 134 typedef struct amdgpu_bo_list *amdgpu_bo_list_handle; 135 136 /** 137 * Define handle to be used to work with VA allocated ranges 138 */ 139 typedef struct amdgpu_va *amdgpu_va_handle; 140 141 /** 142 * Define handle for semaphore 143 */ 144 typedef struct amdgpu_semaphore *amdgpu_semaphore_handle; 145 146 /*--------------------------------------------------------------------------*/ 147 /* -------------------------- Structures ---------------------------------- */ 148 /*--------------------------------------------------------------------------*/ 149 150 /** 151 * Structure describing memory allocation request 152 * 153 * \sa amdgpu_bo_alloc() 154 * 155 */ 156 struct amdgpu_bo_alloc_request { 157 /** Allocation request. It must be aligned correctly. */ 158 uint64_t alloc_size; 159 160 /** 161 * It may be required to have some specific alignment requirements 162 * for physical back-up storage (e.g. for displayable surface). 163 * If 0 there is no special alignment requirement 164 */ 165 uint64_t phys_alignment; 166 167 /** 168 * UMD should specify where to allocate memory and how it 169 * will be accessed by the CPU. 170 */ 171 uint32_t preferred_heap; 172 173 /** Additional flags passed on allocation */ 174 uint64_t flags; 175 }; 176 177 /** 178 * Special UMD specific information associated with buffer. 179 * 180 * It may be need to pass some buffer charactersitic as part 181 * of buffer sharing. Such information are defined UMD and 182 * opaque for libdrm_amdgpu as well for kernel driver. 183 * 184 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info, 185 * amdgpu_bo_import(), amdgpu_bo_export 186 * 187 */ 188 struct amdgpu_bo_metadata { 189 /** Special flag associated with surface */ 190 uint64_t flags; 191 192 /** 193 * ASIC-specific tiling information (also used by DCE). 194 * The encoding is defined by the AMDGPU_TILING_* definitions. 195 */ 196 uint64_t tiling_info; 197 198 /** Size of metadata associated with the buffer, in bytes. */ 199 uint32_t size_metadata; 200 201 /** UMD specific metadata. Opaque for kernel */ 202 uint32_t umd_metadata[64]; 203 }; 204 205 /** 206 * Structure describing allocated buffer. Client may need 207 * to query such information as part of 'sharing' buffers mechanism 208 * 209 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(), 210 * amdgpu_bo_import(), amdgpu_bo_export() 211 */ 212 struct amdgpu_bo_info { 213 /** Allocated memory size */ 214 uint64_t alloc_size; 215 216 /** 217 * It may be required to have some specific alignment requirements 218 * for physical back-up storage. 219 */ 220 uint64_t phys_alignment; 221 222 /** Heap where to allocate memory. */ 223 uint32_t preferred_heap; 224 225 /** Additional allocation flags. */ 226 uint64_t alloc_flags; 227 228 /** Metadata associated with buffer if any. */ 229 struct amdgpu_bo_metadata metadata; 230 }; 231 232 /** 233 * Structure with information about "imported" buffer 234 * 235 * \sa amdgpu_bo_import() 236 * 237 */ 238 struct amdgpu_bo_import_result { 239 /** Handle of memory/buffer to use */ 240 amdgpu_bo_handle buf_handle; 241 242 /** Buffer size */ 243 uint64_t alloc_size; 244 }; 245 246 /** 247 * 248 * Structure to describe GDS partitioning information. 249 * \note OA and GWS resources are asscoiated with GDS partition 250 * 251 * \sa amdgpu_gpu_resource_query_gds_info 252 * 253 */ 254 struct amdgpu_gds_resource_info { 255 uint32_t gds_gfx_partition_size; 256 uint32_t compute_partition_size; 257 uint32_t gds_total_size; 258 uint32_t gws_per_gfx_partition; 259 uint32_t gws_per_compute_partition; 260 uint32_t oa_per_gfx_partition; 261 uint32_t oa_per_compute_partition; 262 }; 263 264 /** 265 * Structure describing CS fence 266 * 267 * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit() 268 * 269 */ 270 struct amdgpu_cs_fence { 271 272 /** In which context IB was sent to execution */ 273 amdgpu_context_handle context; 274 275 /** To which HW IP type the fence belongs */ 276 uint32_t ip_type; 277 278 /** IP instance index if there are several IPs of the same type. */ 279 uint32_t ip_instance; 280 281 /** Ring index of the HW IP */ 282 uint32_t ring; 283 284 /** Specify fence for which we need to check submission status.*/ 285 uint64_t fence; 286 }; 287 288 /** 289 * Structure describing IB 290 * 291 * \sa amdgpu_cs_request, amdgpu_cs_submit() 292 * 293 */ 294 struct amdgpu_cs_ib_info { 295 /** Special flags */ 296 uint64_t flags; 297 298 /** Virtual MC address of the command buffer */ 299 uint64_t ib_mc_address; 300 301 /** 302 * Size of Command Buffer to be submitted. 303 * - The size is in units of dwords (4 bytes). 304 * - Could be 0 305 */ 306 uint32_t size; 307 }; 308 309 /** 310 * Structure describing fence information 311 * 312 * \sa amdgpu_cs_request, amdgpu_cs_query_fence, 313 * amdgpu_cs_submit(), amdgpu_cs_query_fence_status() 314 */ 315 struct amdgpu_cs_fence_info { 316 /** buffer object for the fence */ 317 amdgpu_bo_handle handle; 318 319 /** fence offset in the unit of sizeof(uint64_t) */ 320 uint64_t offset; 321 }; 322 323 /** 324 * Structure describing submission request 325 * 326 * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx 327 * 328 * \sa amdgpu_cs_submit() 329 */ 330 struct amdgpu_cs_request { 331 /** Specify flags with additional information */ 332 uint64_t flags; 333 334 /** Specify HW IP block type to which to send the IB. */ 335 unsigned ip_type; 336 337 /** IP instance index if there are several IPs of the same type. */ 338 unsigned ip_instance; 339 340 /** 341 * Specify ring index of the IP. We could have several rings 342 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1. 343 */ 344 uint32_t ring; 345 346 /** 347 * List handle with resources used by this request. 348 */ 349 amdgpu_bo_list_handle resources; 350 351 /** 352 * Number of dependencies this Command submission needs to 353 * wait for before starting execution. 354 */ 355 uint32_t number_of_dependencies; 356 357 /** 358 * Array of dependencies which need to be met before 359 * execution can start. 360 */ 361 struct amdgpu_cs_fence *dependencies; 362 363 /** Number of IBs to submit in the field ibs. */ 364 uint32_t number_of_ibs; 365 366 /** 367 * IBs to submit. Those IBs will be submit together as single entity 368 */ 369 struct amdgpu_cs_ib_info *ibs; 370 371 /** 372 * The returned sequence number for the command submission 373 */ 374 uint64_t seq_no; 375 376 /** 377 * The fence information 378 */ 379 struct amdgpu_cs_fence_info fence_info; 380 }; 381 382 /** 383 * Structure which provide information about GPU VM MC Address space 384 * alignments requirements 385 * 386 * \sa amdgpu_query_buffer_size_alignment 387 */ 388 struct amdgpu_buffer_size_alignments { 389 /** Size alignment requirement for allocation in 390 * local memory */ 391 uint64_t size_local; 392 393 /** 394 * Size alignment requirement for allocation in remote memory 395 */ 396 uint64_t size_remote; 397 }; 398 399 /** 400 * Structure which provide information about heap 401 * 402 * \sa amdgpu_query_heap_info() 403 * 404 */ 405 struct amdgpu_heap_info { 406 /** Theoretical max. available memory in the given heap */ 407 uint64_t heap_size; 408 409 /** 410 * Number of bytes allocated in the heap. This includes all processes 411 * and private allocations in the kernel. It changes when new buffers 412 * are allocated, freed, and moved. It cannot be larger than 413 * heap_size. 414 */ 415 uint64_t heap_usage; 416 417 /** 418 * Theoretical possible max. size of buffer which 419 * could be allocated in the given heap 420 */ 421 uint64_t max_allocation; 422 }; 423 424 /** 425 * Describe GPU h/w info needed for UMD correct initialization 426 * 427 * \sa amdgpu_query_gpu_info() 428 */ 429 struct amdgpu_gpu_info { 430 /** Asic id */ 431 uint32_t asic_id; 432 /** Chip revision */ 433 uint32_t chip_rev; 434 /** Chip external revision */ 435 uint32_t chip_external_rev; 436 /** Family ID */ 437 uint32_t family_id; 438 /** Special flags */ 439 uint64_t ids_flags; 440 /** max engine clock*/ 441 uint64_t max_engine_clk; 442 /** max memory clock */ 443 uint64_t max_memory_clk; 444 /** number of shader engines */ 445 uint32_t num_shader_engines; 446 /** number of shader arrays per engine */ 447 uint32_t num_shader_arrays_per_engine; 448 /** Number of available good shader pipes */ 449 uint32_t avail_quad_shader_pipes; 450 /** Max. number of shader pipes.(including good and bad pipes */ 451 uint32_t max_quad_shader_pipes; 452 /** Number of parameter cache entries per shader quad pipe */ 453 uint32_t cache_entries_per_quad_pipe; 454 /** Number of available graphics context */ 455 uint32_t num_hw_gfx_contexts; 456 /** Number of render backend pipes */ 457 uint32_t rb_pipes; 458 /** Enabled render backend pipe mask */ 459 uint32_t enabled_rb_pipes_mask; 460 /** Frequency of GPU Counter */ 461 uint32_t gpu_counter_freq; 462 /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */ 463 uint32_t backend_disable[4]; 464 /** Value of MC_ARB_RAMCFG register*/ 465 uint32_t mc_arb_ramcfg; 466 /** Value of GB_ADDR_CONFIG */ 467 uint32_t gb_addr_cfg; 468 /** Values of the GB_TILE_MODE0..31 registers */ 469 uint32_t gb_tile_mode[32]; 470 /** Values of GB_MACROTILE_MODE0..15 registers */ 471 uint32_t gb_macro_tile_mode[16]; 472 /** Value of PA_SC_RASTER_CONFIG register per SE */ 473 uint32_t pa_sc_raster_cfg[4]; 474 /** Value of PA_SC_RASTER_CONFIG_1 register per SE */ 475 uint32_t pa_sc_raster_cfg1[4]; 476 /* CU info */ 477 uint32_t cu_active_number; 478 uint32_t cu_ao_mask; 479 uint32_t cu_bitmap[4][4]; 480 /* video memory type info*/ 481 uint32_t vram_type; 482 /* video memory bit width*/ 483 uint32_t vram_bit_width; 484 /** constant engine ram size*/ 485 uint32_t ce_ram_size; 486 /* vce harvesting instance */ 487 uint32_t vce_harvest_config; 488 /* PCI revision ID */ 489 uint32_t pci_rev_id; 490 }; 491 492 493 /*--------------------------------------------------------------------------*/ 494 /*------------------------- Functions --------------------------------------*/ 495 /*--------------------------------------------------------------------------*/ 496 497 /* 498 * Initialization / Cleanup 499 * 500 */ 501 502 /** 503 * 504 * \param fd - \c [in] File descriptor for AMD GPU device 505 * received previously as the result of 506 * e.g. drmOpen() call. 507 * For legacy fd type, the DRI2/DRI3 508 * authentication should be done before 509 * calling this function. 510 * \param major_version - \c [out] Major version of library. It is assumed 511 * that adding new functionality will cause 512 * increase in major version 513 * \param minor_version - \c [out] Minor version of library 514 * \param device_handle - \c [out] Pointer to opaque context which should 515 * be passed as the first parameter on each 516 * API call 517 * 518 * 519 * \return 0 on success\n 520 * <0 - Negative POSIX Error code 521 * 522 * 523 * \sa amdgpu_device_deinitialize() 524 */ 525 int amdgpu_device_initialize(int fd, 526 uint32_t *major_version, 527 uint32_t *minor_version, 528 amdgpu_device_handle *device_handle); 529 530 /** 531 * 532 * When access to such library does not needed any more the special 533 * function must be call giving opportunity to clean up any 534 * resources if needed. 535 * 536 * \param device_handle - \c [in] Context associated with file 537 * descriptor for AMD GPU device 538 * received previously as the 539 * result e.g. of drmOpen() call. 540 * 541 * \return 0 on success\n 542 * <0 - Negative POSIX Error code 543 * 544 * \sa amdgpu_device_initialize() 545 * 546 */ 547 int amdgpu_device_deinitialize(amdgpu_device_handle device_handle); 548 549 /* 550 * Memory Management 551 * 552 */ 553 554 /** 555 * Allocate memory to be used by UMD for GPU related operations 556 * 557 * \param dev - \c [in] Device handle. 558 * See #amdgpu_device_initialize() 559 * \param alloc_buffer - \c [in] Pointer to the structure describing an 560 * allocation request 561 * \param buf_handle - \c [out] Allocated buffer handle 562 * 563 * \return 0 on success\n 564 * <0 - Negative POSIX Error code 565 * 566 * \sa amdgpu_bo_free() 567 */ 568 int amdgpu_bo_alloc(amdgpu_device_handle dev, 569 struct amdgpu_bo_alloc_request *alloc_buffer, 570 amdgpu_bo_handle *buf_handle); 571 572 /** 573 * Associate opaque data with buffer to be queried by another UMD 574 * 575 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 576 * \param buf_handle - \c [in] Buffer handle 577 * \param info - \c [in] Metadata to associated with buffer 578 * 579 * \return 0 on success\n 580 * <0 - Negative POSIX Error code 581 */ 582 int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle, 583 struct amdgpu_bo_metadata *info); 584 585 /** 586 * Query buffer information including metadata previusly associated with 587 * buffer. 588 * 589 * \param dev - \c [in] Device handle. 590 * See #amdgpu_device_initialize() 591 * \param buf_handle - \c [in] Buffer handle 592 * \param info - \c [out] Structure describing buffer 593 * 594 * \return 0 on success\n 595 * <0 - Negative POSIX Error code 596 * 597 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() 598 */ 599 int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle, 600 struct amdgpu_bo_info *info); 601 602 /** 603 * Allow others to get access to buffer 604 * 605 * \param dev - \c [in] Device handle. 606 * See #amdgpu_device_initialize() 607 * \param buf_handle - \c [in] Buffer handle 608 * \param type - \c [in] Type of handle requested 609 * \param shared_handle - \c [out] Special "shared" handle 610 * 611 * \return 0 on success\n 612 * <0 - Negative POSIX Error code 613 * 614 * \sa amdgpu_bo_import() 615 * 616 */ 617 int amdgpu_bo_export(amdgpu_bo_handle buf_handle, 618 enum amdgpu_bo_handle_type type, 619 uint32_t *shared_handle); 620 621 /** 622 * Request access to "shared" buffer 623 * 624 * \param dev - \c [in] Device handle. 625 * See #amdgpu_device_initialize() 626 * \param type - \c [in] Type of handle requested 627 * \param shared_handle - \c [in] Shared handle received as result "import" 628 * operation 629 * \param output - \c [out] Pointer to structure with information 630 * about imported buffer 631 * 632 * \return 0 on success\n 633 * <0 - Negative POSIX Error code 634 * 635 * \note Buffer must be "imported" only using new "fd" (different from 636 * one used by "exporter"). 637 * 638 * \sa amdgpu_bo_export() 639 * 640 */ 641 int amdgpu_bo_import(amdgpu_device_handle dev, 642 enum amdgpu_bo_handle_type type, 643 uint32_t shared_handle, 644 struct amdgpu_bo_import_result *output); 645 646 /** 647 * Request GPU access to user allocated memory e.g. via "malloc" 648 * 649 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 650 * \param cpu - [in] CPU address of user allocated memory which we 651 * want to map to GPU address space (make GPU accessible) 652 * (This address must be correctly aligned). 653 * \param size - [in] Size of allocation (must be correctly aligned) 654 * \param buf_handle - [out] Buffer handle for the userptr memory 655 * resource on submission and be used in other operations. 656 * 657 * 658 * \return 0 on success\n 659 * <0 - Negative POSIX Error code 660 * 661 * \note 662 * This call doesn't guarantee that such memory will be persistently 663 * "locked" / make non-pageable. The purpose of this call is to provide 664 * opportunity for GPU get access to this resource during submission. 665 * 666 * The maximum amount of memory which could be mapped in this call depends 667 * if overcommit is disabled or not. If overcommit is disabled than the max. 668 * amount of memory to be pinned will be limited by left "free" size in total 669 * amount of memory which could be locked simultaneously ("GART" size). 670 * 671 * Supported (theoretical) max. size of mapping is restricted only by 672 * "GART" size. 673 * 674 * It is responsibility of caller to correctly specify access rights 675 * on VA assignment. 676 */ 677 int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, 678 void *cpu, uint64_t size, 679 amdgpu_bo_handle *buf_handle); 680 681 /** 682 * Validate if the user memory comes from BO 683 * 684 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 685 * \param cpu - [in] CPU address of user allocated memory which we 686 * want to map to GPU address space (make GPU accessible) 687 * (This address must be correctly aligned). 688 * \param size - [in] Size of allocation (must be correctly aligned) 689 * \param buf_handle - [out] Buffer handle for the userptr memory 690 * if the user memory is not from BO, the buf_handle will be NULL. 691 * \param offset_in_bo - [out] offset in this BO for this user memory 692 * 693 * 694 * \return 0 on success\n 695 * <0 - Negative POSIX Error code 696 * 697 */ 698 int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev, 699 void *cpu, 700 uint64_t size, 701 amdgpu_bo_handle *buf_handle, 702 uint64_t *offset_in_bo); 703 704 /** 705 * Free previously allocated memory 706 * 707 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 708 * \param buf_handle - \c [in] Buffer handle to free 709 * 710 * \return 0 on success\n 711 * <0 - Negative POSIX Error code 712 * 713 * \note In the case of memory shared between different applications all 714 * resources will be “physically” freed only all such applications 715 * will be terminated 716 * \note If is UMD responsibility to ‘free’ buffer only when there is no 717 * more GPU access 718 * 719 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() 720 * 721 */ 722 int amdgpu_bo_free(amdgpu_bo_handle buf_handle); 723 724 /** 725 * Increase the reference count of a buffer object 726 * 727 * \param bo - \c [in] Buffer object handle to increase the reference count 728 * 729 * \sa amdgpu_bo_alloc(), amdgpu_bo_free() 730 * 731 */ 732 void amdgpu_bo_inc_ref(amdgpu_bo_handle bo); 733 734 /** 735 * Request CPU access to GPU accessible memory 736 * 737 * \param buf_handle - \c [in] Buffer handle 738 * \param cpu - \c [out] CPU address to be used for access 739 * 740 * \return 0 on success\n 741 * <0 - Negative POSIX Error code 742 * 743 * \sa amdgpu_bo_cpu_unmap() 744 * 745 */ 746 int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu); 747 748 /** 749 * Release CPU access to GPU memory 750 * 751 * \param buf_handle - \c [in] Buffer handle 752 * 753 * \return 0 on success\n 754 * <0 - Negative POSIX Error code 755 * 756 * \sa amdgpu_bo_cpu_map() 757 * 758 */ 759 int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle); 760 761 /** 762 * Wait until a buffer is not used by the device. 763 * 764 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 765 * \param buf_handle - \c [in] Buffer handle. 766 * \param timeout_ns - Timeout in nanoseconds. 767 * \param buffer_busy - 0 if buffer is idle, all GPU access was completed 768 * and no GPU access is scheduled. 769 * 1 GPU access is in fly or scheduled 770 * 771 * \return 0 - on success 772 * <0 - Negative POSIX Error code 773 */ 774 int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle, 775 uint64_t timeout_ns, 776 bool *buffer_busy); 777 778 /** 779 * Creates a BO list handle for command submission. 780 * 781 * \param dev - \c [in] Device handle. 782 * See #amdgpu_device_initialize() 783 * \param number_of_buffers - \c [in] Number of BOs in the list 784 * \param buffers - \c [in] List of BO handles 785 * \param result - \c [out] Created BO list handle 786 * 787 * \return 0 on success\n 788 * <0 - Negative POSIX Error code 789 * 790 * \sa amdgpu_bo_list_destroy_raw(), amdgpu_cs_submit_raw2() 791 */ 792 int amdgpu_bo_list_create_raw(amdgpu_device_handle dev, 793 uint32_t number_of_buffers, 794 struct drm_amdgpu_bo_list_entry *buffers, 795 uint32_t *result); 796 797 /** 798 * Destroys a BO list handle. 799 * 800 * \param bo_list - \c [in] BO list handle. 801 * 802 * \return 0 on success\n 803 * <0 - Negative POSIX Error code 804 * 805 * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2() 806 */ 807 int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list); 808 809 /** 810 * Creates a BO list handle for command submission. 811 * 812 * \param dev - \c [in] Device handle. 813 * See #amdgpu_device_initialize() 814 * \param number_of_resources - \c [in] Number of BOs in the list 815 * \param resources - \c [in] List of BO handles 816 * \param resource_prios - \c [in] Optional priority for each handle 817 * \param result - \c [out] Created BO list handle 818 * 819 * \return 0 on success\n 820 * <0 - Negative POSIX Error code 821 * 822 * \sa amdgpu_bo_list_destroy() 823 */ 824 int amdgpu_bo_list_create(amdgpu_device_handle dev, 825 uint32_t number_of_resources, 826 amdgpu_bo_handle *resources, 827 uint8_t *resource_prios, 828 amdgpu_bo_list_handle *result); 829 830 /** 831 * Destroys a BO list handle. 832 * 833 * \param handle - \c [in] BO list handle. 834 * 835 * \return 0 on success\n 836 * <0 - Negative POSIX Error code 837 * 838 * \sa amdgpu_bo_list_create() 839 */ 840 int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle); 841 842 /** 843 * Update resources for existing BO list 844 * 845 * \param handle - \c [in] BO list handle 846 * \param number_of_resources - \c [in] Number of BOs in the list 847 * \param resources - \c [in] List of BO handles 848 * \param resource_prios - \c [in] Optional priority for each handle 849 * 850 * \return 0 on success\n 851 * <0 - Negative POSIX Error code 852 * 853 * \sa amdgpu_bo_list_update() 854 */ 855 int amdgpu_bo_list_update(amdgpu_bo_list_handle handle, 856 uint32_t number_of_resources, 857 amdgpu_bo_handle *resources, 858 uint8_t *resource_prios); 859 860 /* 861 * GPU Execution context 862 * 863 */ 864 865 /** 866 * Create GPU execution Context 867 * 868 * For the purpose of GPU Scheduler and GPU Robustness extensions it is 869 * necessary to have information/identify rendering/compute contexts. 870 * It also may be needed to associate some specific requirements with such 871 * contexts. Kernel driver will guarantee that submission from the same 872 * context will always be executed in order (first come, first serve). 873 * 874 * 875 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 876 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_* 877 * \param context - \c [out] GPU Context handle 878 * 879 * \return 0 on success\n 880 * <0 - Negative POSIX Error code 881 * 882 * \sa amdgpu_cs_ctx_free() 883 * 884 */ 885 int amdgpu_cs_ctx_create2(amdgpu_device_handle dev, 886 uint32_t priority, 887 amdgpu_context_handle *context); 888 /** 889 * Create GPU execution Context 890 * 891 * Refer to amdgpu_cs_ctx_create2 for full documentation. This call 892 * is missing the priority parameter. 893 * 894 * \sa amdgpu_cs_ctx_create2() 895 * 896 */ 897 int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 898 amdgpu_context_handle *context); 899 900 /** 901 * 902 * Destroy GPU execution context when not needed any more 903 * 904 * \param context - \c [in] GPU Context handle 905 * 906 * \return 0 on success\n 907 * <0 - Negative POSIX Error code 908 * 909 * \sa amdgpu_cs_ctx_create() 910 * 911 */ 912 int amdgpu_cs_ctx_free(amdgpu_context_handle context); 913 914 /** 915 * Override the submission priority for the given context using a master fd. 916 * 917 * \param dev - \c [in] device handle 918 * \param context - \c [in] context handle for context id 919 * \param master_fd - \c [in] The master fd to authorize the override. 920 * \param priority - \c [in] The priority to assign to the context. 921 * 922 * \return 0 on success or a a negative Posix error code on failure. 923 */ 924 int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev, 925 amdgpu_context_handle context, 926 int master_fd, 927 unsigned priority); 928 929 /** 930 * Query reset state for the specific GPU Context 931 * 932 * \param context - \c [in] GPU Context handle 933 * \param state - \c [out] One of AMDGPU_CTX_*_RESET 934 * \param hangs - \c [out] Number of hangs caused by the context. 935 * 936 * \return 0 on success\n 937 * <0 - Negative POSIX Error code 938 * 939 * \sa amdgpu_cs_ctx_create() 940 * 941 */ 942 int amdgpu_cs_query_reset_state(amdgpu_context_handle context, 943 uint32_t *state, uint32_t *hangs); 944 945 /** 946 * Query reset state for the specific GPU Context. 947 * 948 * \param context - \c [in] GPU Context handle 949 * \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_* 950 * 951 * \return 0 on success\n 952 * <0 - Negative POSIX Error code 953 * 954 * \sa amdgpu_cs_ctx_create() 955 * 956 */ 957 int amdgpu_cs_query_reset_state2(amdgpu_context_handle context, 958 uint64_t *flags); 959 960 /* 961 * Command Buffers Management 962 * 963 */ 964 965 /** 966 * Send request to submit command buffers to hardware. 967 * 968 * Kernel driver could use GPU Scheduler to make decision when physically 969 * sent this request to the hardware. Accordingly this request could be put 970 * in queue and sent for execution later. The only guarantee is that request 971 * from the same GPU context to the same ip:ip_instance:ring will be executed in 972 * order. 973 * 974 * The caller can specify the user fence buffer/location with the fence_info in the 975 * cs_request.The sequence number is returned via the 'seq_no' parameter 976 * in ibs_request structure. 977 * 978 * 979 * \param dev - \c [in] Device handle. 980 * See #amdgpu_device_initialize() 981 * \param context - \c [in] GPU Context 982 * \param flags - \c [in] Global submission flags 983 * \param ibs_request - \c [in/out] Pointer to submission requests. 984 * We could submit to the several 985 * engines/rings simulteniously as 986 * 'atomic' operation 987 * \param number_of_requests - \c [in] Number of submission requests 988 * 989 * \return 0 on success\n 990 * <0 - Negative POSIX Error code 991 * 992 * \note It is required to pass correct resource list with buffer handles 993 * which will be accessible by command buffers from submission 994 * This will allow kernel driver to correctly implement "paging". 995 * Failure to do so will have unpredictable results. 996 * 997 * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(), 998 * amdgpu_cs_query_fence_status() 999 * 1000 */ 1001 int amdgpu_cs_submit(amdgpu_context_handle context, 1002 uint64_t flags, 1003 struct amdgpu_cs_request *ibs_request, 1004 uint32_t number_of_requests); 1005 1006 /** 1007 * Query status of Command Buffer Submission 1008 * 1009 * \param fence - \c [in] Structure describing fence to query 1010 * \param timeout_ns - \c [in] Timeout value to wait 1011 * \param flags - \c [in] Flags for the query 1012 * \param expired - \c [out] If fence expired or not.\n 1013 * 0 – if fence is not expired\n 1014 * !0 - otherwise 1015 * 1016 * \return 0 on success\n 1017 * <0 - Negative POSIX Error code 1018 * 1019 * \note If UMD wants only to check operation status and returned immediately 1020 * then timeout value as 0 must be passed. In this case success will be 1021 * returned in the case if submission was completed or timeout error 1022 * code. 1023 * 1024 * \sa amdgpu_cs_submit() 1025 */ 1026 int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence, 1027 uint64_t timeout_ns, 1028 uint64_t flags, 1029 uint32_t *expired); 1030 1031 /** 1032 * Wait for multiple fences 1033 * 1034 * \param fences - \c [in] The fence array to wait 1035 * \param fence_count - \c [in] The fence count 1036 * \param wait_all - \c [in] If true, wait all fences to be signaled, 1037 * otherwise, wait at least one fence 1038 * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds 1039 * \param status - \c [out] '1' for signaled, '0' for timeout 1040 * \param first - \c [out] the index of the first signaled fence from @fences 1041 * 1042 * \return 0 on success 1043 * <0 - Negative POSIX Error code 1044 * 1045 * \note Currently it supports only one amdgpu_device. All fences come from 1046 * the same amdgpu_device with the same fd. 1047 */ 1048 int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences, 1049 uint32_t fence_count, 1050 bool wait_all, 1051 uint64_t timeout_ns, 1052 uint32_t *status, uint32_t *first); 1053 1054 /* 1055 * Query / Info API 1056 * 1057 */ 1058 1059 /** 1060 * Query allocation size alignments 1061 * 1062 * UMD should query information about GPU VM MC size alignments requirements 1063 * to be able correctly choose required allocation size and implement 1064 * internal optimization if needed. 1065 * 1066 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1067 * \param info - \c [out] Pointer to structure to get size alignment 1068 * requirements 1069 * 1070 * \return 0 on success\n 1071 * <0 - Negative POSIX Error code 1072 * 1073 */ 1074 int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev, 1075 struct amdgpu_buffer_size_alignments 1076 *info); 1077 1078 /** 1079 * Query firmware versions 1080 * 1081 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1082 * \param fw_type - \c [in] AMDGPU_INFO_FW_* 1083 * \param ip_instance - \c [in] Index of the IP block of the same type. 1084 * \param index - \c [in] Index of the engine. (for SDMA and MEC) 1085 * \param version - \c [out] Pointer to to the "version" return value 1086 * \param feature - \c [out] Pointer to to the "feature" return value 1087 * 1088 * \return 0 on success\n 1089 * <0 - Negative POSIX Error code 1090 * 1091 */ 1092 int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type, 1093 unsigned ip_instance, unsigned index, 1094 uint32_t *version, uint32_t *feature); 1095 1096 /** 1097 * Query the number of HW IP instances of a certain type. 1098 * 1099 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1100 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1101 * \param count - \c [out] Pointer to structure to get information 1102 * 1103 * \return 0 on success\n 1104 * <0 - Negative POSIX Error code 1105 */ 1106 int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type, 1107 uint32_t *count); 1108 1109 /** 1110 * Query engine information 1111 * 1112 * This query allows UMD to query information different engines and their 1113 * capabilities. 1114 * 1115 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1116 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1117 * \param ip_instance - \c [in] Index of the IP block of the same type. 1118 * \param info - \c [out] Pointer to structure to get information 1119 * 1120 * \return 0 on success\n 1121 * <0 - Negative POSIX Error code 1122 */ 1123 int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type, 1124 unsigned ip_instance, 1125 struct drm_amdgpu_info_hw_ip *info); 1126 1127 /** 1128 * Query heap information 1129 * 1130 * This query allows UMD to query potentially available memory resources and 1131 * adjust their logic if necessary. 1132 * 1133 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1134 * \param heap - \c [in] Heap type 1135 * \param info - \c [in] Pointer to structure to get needed information 1136 * 1137 * \return 0 on success\n 1138 * <0 - Negative POSIX Error code 1139 * 1140 */ 1141 int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap, 1142 uint32_t flags, struct amdgpu_heap_info *info); 1143 1144 /** 1145 * Get the CRTC ID from the mode object ID 1146 * 1147 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1148 * \param id - \c [in] Mode object ID 1149 * \param result - \c [in] Pointer to the CRTC ID 1150 * 1151 * \return 0 on success\n 1152 * <0 - Negative POSIX Error code 1153 * 1154 */ 1155 int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id, 1156 int32_t *result); 1157 1158 /** 1159 * Query GPU H/w Info 1160 * 1161 * Query hardware specific information 1162 * 1163 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1164 * \param heap - \c [in] Heap type 1165 * \param info - \c [in] Pointer to structure to get needed information 1166 * 1167 * \return 0 on success\n 1168 * <0 - Negative POSIX Error code 1169 * 1170 */ 1171 int amdgpu_query_gpu_info(amdgpu_device_handle dev, 1172 struct amdgpu_gpu_info *info); 1173 1174 /** 1175 * Query hardware or driver information. 1176 * 1177 * The return size is query-specific and depends on the "info_id" parameter. 1178 * No more than "size" bytes is returned. 1179 * 1180 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1181 * \param info_id - \c [in] AMDGPU_INFO_* 1182 * \param size - \c [in] Size of the returned value. 1183 * \param value - \c [out] Pointer to the return value. 1184 * 1185 * \return 0 on success\n 1186 * <0 - Negative POSIX error code 1187 * 1188 */ 1189 int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id, 1190 unsigned size, void *value); 1191 1192 /** 1193 * Query hardware or driver information. 1194 * 1195 * The return size is query-specific and depends on the "info_id" parameter. 1196 * No more than "size" bytes is returned. 1197 * 1198 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1199 * \param info - \c [in] amdgpu_sw_info_* 1200 * \param value - \c [out] Pointer to the return value. 1201 * 1202 * \return 0 on success\n 1203 * <0 - Negative POSIX error code 1204 * 1205 */ 1206 int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info, 1207 void *value); 1208 1209 /** 1210 * Query information about GDS 1211 * 1212 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1213 * \param gds_info - \c [out] Pointer to structure to get GDS information 1214 * 1215 * \return 0 on success\n 1216 * <0 - Negative POSIX Error code 1217 * 1218 */ 1219 int amdgpu_query_gds_info(amdgpu_device_handle dev, 1220 struct amdgpu_gds_resource_info *gds_info); 1221 1222 /** 1223 * Query information about sensor. 1224 * 1225 * The return size is query-specific and depends on the "sensor_type" 1226 * parameter. No more than "size" bytes is returned. 1227 * 1228 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1229 * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_* 1230 * \param size - \c [in] Size of the returned value. 1231 * \param value - \c [out] Pointer to the return value. 1232 * 1233 * \return 0 on success\n 1234 * <0 - Negative POSIX Error code 1235 * 1236 */ 1237 int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type, 1238 unsigned size, void *value); 1239 1240 /** 1241 * Read a set of consecutive memory-mapped registers. 1242 * Not all registers are allowed to be read by userspace. 1243 * 1244 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize( 1245 * \param dword_offset - \c [in] Register offset in dwords 1246 * \param count - \c [in] The number of registers to read starting 1247 * from the offset 1248 * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other 1249 * uses. Set it to 0xffffffff if unsure. 1250 * \param flags - \c [in] Flags with additional information. 1251 * \param values - \c [out] The pointer to return values. 1252 * 1253 * \return 0 on success\n 1254 * <0 - Negative POSIX error code 1255 * 1256 */ 1257 int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset, 1258 unsigned count, uint32_t instance, uint32_t flags, 1259 uint32_t *values); 1260 1261 /** 1262 * Flag to request VA address range in the 32bit address space 1263 */ 1264 #define AMDGPU_VA_RANGE_32_BIT 0x1 1265 #define AMDGPU_VA_RANGE_HIGH 0x2 1266 1267 /** 1268 * Allocate virtual address range 1269 * 1270 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 1271 * \param va_range_type - \c [in] Type of MC va range from which to allocate 1272 * \param size - \c [in] Size of range. Size must be correctly* aligned. 1273 * It is client responsibility to correctly aligned size based on the future 1274 * usage of allocated range. 1275 * \param va_base_alignment - \c [in] Overwrite base address alignment 1276 * requirement for GPU VM MC virtual 1277 * address assignment. Must be multiple of size alignments received as 1278 * 'amdgpu_buffer_size_alignments'. 1279 * If 0 use the default one. 1280 * \param va_base_required - \c [in] Specified required va base address. 1281 * If 0 then library choose available one. 1282 * If !0 value will be passed and those value already "in use" then 1283 * corresponding error status will be returned. 1284 * \param va_base_allocated - \c [out] On return: Allocated VA base to be used 1285 * by client. 1286 * \param va_range_handle - \c [out] On return: Handle assigned to allocation 1287 * \param flags - \c [in] flags for special VA range 1288 * 1289 * \return 0 on success\n 1290 * >0 - AMD specific error code\n 1291 * <0 - Negative POSIX Error code 1292 * 1293 * \notes \n 1294 * It is client responsibility to correctly handle VA assignments and usage. 1295 * Neither kernel driver nor libdrm_amdpgu are able to prevent and 1296 * detect wrong va assignment. 1297 * 1298 * It is client responsibility to correctly handle multi-GPU cases and to pass 1299 * the corresponding arrays of all devices handles where corresponding VA will 1300 * be used. 1301 * 1302 */ 1303 int amdgpu_va_range_alloc(amdgpu_device_handle dev, 1304 enum amdgpu_gpu_va_range va_range_type, 1305 uint64_t size, 1306 uint64_t va_base_alignment, 1307 uint64_t va_base_required, 1308 uint64_t *va_base_allocated, 1309 amdgpu_va_handle *va_range_handle, 1310 uint64_t flags); 1311 1312 /** 1313 * Free previously allocated virtual address range 1314 * 1315 * 1316 * \param va_range_handle - \c [in] Handle assigned to VA allocation 1317 * 1318 * \return 0 on success\n 1319 * >0 - AMD specific error code\n 1320 * <0 - Negative POSIX Error code 1321 * 1322 */ 1323 int amdgpu_va_range_free(amdgpu_va_handle va_range_handle); 1324 1325 /** 1326 * Query virtual address range 1327 * 1328 * UMD can query GPU VM range supported by each device 1329 * to initialize its own VAM accordingly. 1330 * 1331 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 1332 * \param type - \c [in] Type of virtual address range 1333 * \param offset - \c [out] Start offset of virtual address range 1334 * \param size - \c [out] Size of virtual address range 1335 * 1336 * \return 0 on success\n 1337 * <0 - Negative POSIX Error code 1338 * 1339 */ 1340 1341 int amdgpu_va_range_query(amdgpu_device_handle dev, 1342 enum amdgpu_gpu_va_range type, 1343 uint64_t *start, 1344 uint64_t *end); 1345 1346 /** 1347 * VA mapping/unmapping for the buffer object 1348 * 1349 * \param bo - \c [in] BO handle 1350 * \param offset - \c [in] Start offset to map 1351 * \param size - \c [in] Size to map 1352 * \param addr - \c [in] Start virtual address. 1353 * \param flags - \c [in] Supported flags for mapping/unmapping 1354 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP 1355 * 1356 * \return 0 on success\n 1357 * <0 - Negative POSIX Error code 1358 * 1359 */ 1360 1361 int amdgpu_bo_va_op(amdgpu_bo_handle bo, 1362 uint64_t offset, 1363 uint64_t size, 1364 uint64_t addr, 1365 uint64_t flags, 1366 uint32_t ops); 1367 1368 /** 1369 * VA mapping/unmapping for a buffer object or PRT region. 1370 * 1371 * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all 1372 * parameters are treated "raw", i.e. size is not automatically aligned, and 1373 * all flags must be specified explicitly. 1374 * 1375 * \param dev - \c [in] device handle 1376 * \param bo - \c [in] BO handle (may be NULL) 1377 * \param offset - \c [in] Start offset to map 1378 * \param size - \c [in] Size to map 1379 * \param addr - \c [in] Start virtual address. 1380 * \param flags - \c [in] Supported flags for mapping/unmapping 1381 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP 1382 * 1383 * \return 0 on success\n 1384 * <0 - Negative POSIX Error code 1385 * 1386 */ 1387 1388 int amdgpu_bo_va_op_raw(amdgpu_device_handle dev, 1389 amdgpu_bo_handle bo, 1390 uint64_t offset, 1391 uint64_t size, 1392 uint64_t addr, 1393 uint64_t flags, 1394 uint32_t ops); 1395 1396 /** 1397 * create semaphore 1398 * 1399 * \param sem - \c [out] semaphore handle 1400 * 1401 * \return 0 on success\n 1402 * <0 - Negative POSIX Error code 1403 * 1404 */ 1405 int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem); 1406 1407 /** 1408 * signal semaphore 1409 * 1410 * \param context - \c [in] GPU Context 1411 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1412 * \param ip_instance - \c [in] Index of the IP block of the same type 1413 * \param ring - \c [in] Specify ring index of the IP 1414 * \param sem - \c [in] semaphore handle 1415 * 1416 * \return 0 on success\n 1417 * <0 - Negative POSIX Error code 1418 * 1419 */ 1420 int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx, 1421 uint32_t ip_type, 1422 uint32_t ip_instance, 1423 uint32_t ring, 1424 amdgpu_semaphore_handle sem); 1425 1426 /** 1427 * wait semaphore 1428 * 1429 * \param context - \c [in] GPU Context 1430 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1431 * \param ip_instance - \c [in] Index of the IP block of the same type 1432 * \param ring - \c [in] Specify ring index of the IP 1433 * \param sem - \c [in] semaphore handle 1434 * 1435 * \return 0 on success\n 1436 * <0 - Negative POSIX Error code 1437 * 1438 */ 1439 int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx, 1440 uint32_t ip_type, 1441 uint32_t ip_instance, 1442 uint32_t ring, 1443 amdgpu_semaphore_handle sem); 1444 1445 /** 1446 * destroy semaphore 1447 * 1448 * \param sem - \c [in] semaphore handle 1449 * 1450 * \return 0 on success\n 1451 * <0 - Negative POSIX Error code 1452 * 1453 */ 1454 int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem); 1455 1456 /** 1457 * Get the ASIC marketing name 1458 * 1459 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1460 * 1461 * \return the constant string of the marketing name 1462 * "NULL" means the ASIC is not found 1463 */ 1464 const char *amdgpu_get_marketing_name(amdgpu_device_handle dev); 1465 1466 /** 1467 * Create kernel sync object 1468 * 1469 * \param dev - \c [in] device handle 1470 * \param flags - \c [in] flags that affect creation 1471 * \param syncobj - \c [out] sync object handle 1472 * 1473 * \return 0 on success\n 1474 * <0 - Negative POSIX Error code 1475 * 1476 */ 1477 int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev, 1478 uint32_t flags, 1479 uint32_t *syncobj); 1480 1481 /** 1482 * Create kernel sync object 1483 * 1484 * \param dev - \c [in] device handle 1485 * \param syncobj - \c [out] sync object handle 1486 * 1487 * \return 0 on success\n 1488 * <0 - Negative POSIX Error code 1489 * 1490 */ 1491 int amdgpu_cs_create_syncobj(amdgpu_device_handle dev, 1492 uint32_t *syncobj); 1493 /** 1494 * Destroy kernel sync object 1495 * 1496 * \param dev - \c [in] device handle 1497 * \param syncobj - \c [in] sync object handle 1498 * 1499 * \return 0 on success\n 1500 * <0 - Negative POSIX Error code 1501 * 1502 */ 1503 int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev, 1504 uint32_t syncobj); 1505 1506 /** 1507 * Reset kernel sync objects to unsignalled state. 1508 * 1509 * \param dev - \c [in] device handle 1510 * \param syncobjs - \c [in] array of sync object handles 1511 * \param syncobj_count - \c [in] number of handles in syncobjs 1512 * 1513 * \return 0 on success\n 1514 * <0 - Negative POSIX Error code 1515 * 1516 */ 1517 int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev, 1518 const uint32_t *syncobjs, uint32_t syncobj_count); 1519 1520 /** 1521 * Signal kernel sync objects. 1522 * 1523 * \param dev - \c [in] device handle 1524 * \param syncobjs - \c [in] array of sync object handles 1525 * \param syncobj_count - \c [in] number of handles in syncobjs 1526 * 1527 * \return 0 on success\n 1528 * <0 - Negative POSIX Error code 1529 * 1530 */ 1531 int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev, 1532 const uint32_t *syncobjs, uint32_t syncobj_count); 1533 1534 /** 1535 * Signal kernel timeline sync objects. 1536 * 1537 * \param dev - \c [in] device handle 1538 * \param syncobjs - \c [in] array of sync object handles 1539 * \param points - \c [in] array of timeline points 1540 * \param syncobj_count - \c [in] number of handles in syncobjs 1541 * 1542 * \return 0 on success\n 1543 * <0 - Negative POSIX Error code 1544 * 1545 */ 1546 int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev, 1547 const uint32_t *syncobjs, 1548 uint64_t *points, 1549 uint32_t syncobj_count); 1550 1551 /** 1552 * Wait for one or all sync objects to signal. 1553 * 1554 * \param dev - \c [in] self-explanatory 1555 * \param handles - \c [in] array of sync object handles 1556 * \param num_handles - \c [in] self-explanatory 1557 * \param timeout_nsec - \c [in] self-explanatory 1558 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_* 1559 * \param first_signaled - \c [in] self-explanatory 1560 * 1561 * \return 0 on success\n 1562 * -ETIME - Timeout 1563 * <0 - Negative POSIX Error code 1564 * 1565 */ 1566 int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev, 1567 uint32_t *handles, unsigned num_handles, 1568 int64_t timeout_nsec, unsigned flags, 1569 uint32_t *first_signaled); 1570 1571 /** 1572 * Wait for one or all sync objects on their points to signal. 1573 * 1574 * \param dev - \c [in] self-explanatory 1575 * \param handles - \c [in] array of sync object handles 1576 * \param points - \c [in] array of sync points to wait 1577 * \param num_handles - \c [in] self-explanatory 1578 * \param timeout_nsec - \c [in] self-explanatory 1579 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_* 1580 * \param first_signaled - \c [in] self-explanatory 1581 * 1582 * \return 0 on success\n 1583 * -ETIME - Timeout 1584 * <0 - Negative POSIX Error code 1585 * 1586 */ 1587 int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev, 1588 uint32_t *handles, uint64_t *points, 1589 unsigned num_handles, 1590 int64_t timeout_nsec, unsigned flags, 1591 uint32_t *first_signaled); 1592 /** 1593 * Query sync objects payloads. 1594 * 1595 * \param dev - \c [in] self-explanatory 1596 * \param handles - \c [in] array of sync object handles 1597 * \param points - \c [out] array of sync points returned, which presents 1598 * syncobj payload. 1599 * \param num_handles - \c [in] self-explanatory 1600 * 1601 * \return 0 on success\n 1602 * -ETIME - Timeout 1603 * <0 - Negative POSIX Error code 1604 * 1605 */ 1606 int amdgpu_cs_syncobj_query(amdgpu_device_handle dev, 1607 uint32_t *handles, uint64_t *points, 1608 unsigned num_handles); 1609 /** 1610 * Query sync objects last signaled or submitted point. 1611 * 1612 * \param dev - \c [in] self-explanatory 1613 * \param handles - \c [in] array of sync object handles 1614 * \param points - \c [out] array of sync points returned, which presents 1615 * syncobj payload. 1616 * \param num_handles - \c [in] self-explanatory 1617 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_QUERY_FLAGS_* 1618 * 1619 * \return 0 on success\n 1620 * -ETIME - Timeout 1621 * <0 - Negative POSIX Error code 1622 * 1623 */ 1624 int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev, 1625 uint32_t *handles, uint64_t *points, 1626 unsigned num_handles, uint32_t flags); 1627 1628 /** 1629 * Export kernel sync object to shareable fd. 1630 * 1631 * \param dev - \c [in] device handle 1632 * \param syncobj - \c [in] sync object handle 1633 * \param shared_fd - \c [out] shared file descriptor. 1634 * 1635 * \return 0 on success\n 1636 * <0 - Negative POSIX Error code 1637 * 1638 */ 1639 int amdgpu_cs_export_syncobj(amdgpu_device_handle dev, 1640 uint32_t syncobj, 1641 int *shared_fd); 1642 /** 1643 * Import kernel sync object from shareable fd. 1644 * 1645 * \param dev - \c [in] device handle 1646 * \param shared_fd - \c [in] shared file descriptor. 1647 * \param syncobj - \c [out] sync object handle 1648 * 1649 * \return 0 on success\n 1650 * <0 - Negative POSIX Error code 1651 * 1652 */ 1653 int amdgpu_cs_import_syncobj(amdgpu_device_handle dev, 1654 int shared_fd, 1655 uint32_t *syncobj); 1656 1657 /** 1658 * Export kernel sync object to a sync_file. 1659 * 1660 * \param dev - \c [in] device handle 1661 * \param syncobj - \c [in] sync object handle 1662 * \param sync_file_fd - \c [out] sync_file file descriptor. 1663 * 1664 * \return 0 on success\n 1665 * <0 - Negative POSIX Error code 1666 * 1667 */ 1668 int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev, 1669 uint32_t syncobj, 1670 int *sync_file_fd); 1671 1672 /** 1673 * Import kernel sync object from a sync_file. 1674 * 1675 * \param dev - \c [in] device handle 1676 * \param syncobj - \c [in] sync object handle 1677 * \param sync_file_fd - \c [in] sync_file file descriptor. 1678 * 1679 * \return 0 on success\n 1680 * <0 - Negative POSIX Error code 1681 * 1682 */ 1683 int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev, 1684 uint32_t syncobj, 1685 int sync_file_fd); 1686 /** 1687 * Export kernel timeline sync object to a sync_file. 1688 * 1689 * \param dev - \c [in] device handle 1690 * \param syncobj - \c [in] sync object handle 1691 * \param point - \c [in] timeline point 1692 * \param flags - \c [in] flags 1693 * \param sync_file_fd - \c [out] sync_file file descriptor. 1694 * 1695 * \return 0 on success\n 1696 * <0 - Negative POSIX Error code 1697 * 1698 */ 1699 int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev, 1700 uint32_t syncobj, 1701 uint64_t point, 1702 uint32_t flags, 1703 int *sync_file_fd); 1704 1705 /** 1706 * Import kernel timeline sync object from a sync_file. 1707 * 1708 * \param dev - \c [in] device handle 1709 * \param syncobj - \c [in] sync object handle 1710 * \param point - \c [in] timeline point 1711 * \param sync_file_fd - \c [in] sync_file file descriptor. 1712 * 1713 * \return 0 on success\n 1714 * <0 - Negative POSIX Error code 1715 * 1716 */ 1717 int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev, 1718 uint32_t syncobj, 1719 uint64_t point, 1720 int sync_file_fd); 1721 1722 /** 1723 * transfer between syncbojs. 1724 * 1725 * \param dev - \c [in] device handle 1726 * \param dst_handle - \c [in] sync object handle 1727 * \param dst_point - \c [in] timeline point, 0 presents dst is binary 1728 * \param src_handle - \c [in] sync object handle 1729 * \param src_point - \c [in] timeline point, 0 presents src is binary 1730 * \param flags - \c [in] flags 1731 * 1732 * \return 0 on success\n 1733 * <0 - Negative POSIX Error code 1734 * 1735 */ 1736 int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev, 1737 uint32_t dst_handle, 1738 uint64_t dst_point, 1739 uint32_t src_handle, 1740 uint64_t src_point, 1741 uint32_t flags); 1742 1743 /** 1744 * Export an amdgpu fence as a handle (syncobj or fd). 1745 * 1746 * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD} 1747 * \param out_handle returned handle 1748 * 1749 * \return 0 on success\n 1750 * <0 - Negative POSIX Error code 1751 */ 1752 int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev, 1753 struct amdgpu_cs_fence *fence, 1754 uint32_t what, 1755 uint32_t *out_handle); 1756 1757 /** 1758 * Submit raw command submission to kernel 1759 * 1760 * \param dev - \c [in] device handle 1761 * \param context - \c [in] context handle for context id 1762 * \param bo_list_handle - \c [in] request bo list handle (0 for none) 1763 * \param num_chunks - \c [in] number of CS chunks to submit 1764 * \param chunks - \c [in] array of CS chunks 1765 * \param seq_no - \c [out] output sequence number for submission. 1766 * 1767 * \return 0 on success\n 1768 * <0 - Negative POSIX Error code 1769 * 1770 */ 1771 struct drm_amdgpu_cs_chunk; 1772 struct drm_amdgpu_cs_chunk_dep; 1773 struct drm_amdgpu_cs_chunk_data; 1774 1775 int amdgpu_cs_submit_raw(amdgpu_device_handle dev, 1776 amdgpu_context_handle context, 1777 amdgpu_bo_list_handle bo_list_handle, 1778 int num_chunks, 1779 struct drm_amdgpu_cs_chunk *chunks, 1780 uint64_t *seq_no); 1781 1782 /** 1783 * Submit raw command submission to the kernel with a raw BO list handle. 1784 * 1785 * \param dev - \c [in] device handle 1786 * \param context - \c [in] context handle for context id 1787 * \param bo_list_handle - \c [in] raw bo list handle (0 for none) 1788 * \param num_chunks - \c [in] number of CS chunks to submit 1789 * \param chunks - \c [in] array of CS chunks 1790 * \param seq_no - \c [out] output sequence number for submission. 1791 * 1792 * \return 0 on success\n 1793 * <0 - Negative POSIX Error code 1794 * 1795 * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw() 1796 */ 1797 int amdgpu_cs_submit_raw2(amdgpu_device_handle dev, 1798 amdgpu_context_handle context, 1799 uint32_t bo_list_handle, 1800 int num_chunks, 1801 struct drm_amdgpu_cs_chunk *chunks, 1802 uint64_t *seq_no); 1803 1804 void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence, 1805 struct drm_amdgpu_cs_chunk_dep *dep); 1806 void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info, 1807 struct drm_amdgpu_cs_chunk_data *data); 1808 1809 /** 1810 * Reserve VMID 1811 * \param context - \c [in] GPU Context 1812 * \param flags - \c [in] TBD 1813 * 1814 * \return 0 on success otherwise POSIX Error code 1815 */ 1816 int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags); 1817 1818 /** 1819 * Free reserved VMID 1820 * \param context - \c [in] GPU Context 1821 * \param flags - \c [in] TBD 1822 * 1823 * \return 0 on success otherwise POSIX Error code 1824 */ 1825 int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags); 1826 1827 #ifdef __cplusplus 1828 } 1829 #endif 1830 #endif /* #ifdef _AMDGPU_H_ */ 1831