1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #ifndef __VMWGFX_DRM_H__ 29 #define __VMWGFX_DRM_H__ 30 31 #include "drm.h" 32 33 #define DRM_VMW_MAX_SURFACE_FACES 6 34 #define DRM_VMW_MAX_MIP_LEVELS 24 35 36 37 #define DRM_VMW_GET_PARAM 0 38 #define DRM_VMW_ALLOC_DMABUF 1 39 #define DRM_VMW_UNREF_DMABUF 2 40 #define DRM_VMW_CURSOR_BYPASS 3 41 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ 42 #define DRM_VMW_CONTROL_STREAM 4 43 #define DRM_VMW_CLAIM_STREAM 5 44 #define DRM_VMW_UNREF_STREAM 6 45 /* guarded by DRM_VMW_PARAM_3D == 1 */ 46 #define DRM_VMW_CREATE_CONTEXT 7 47 #define DRM_VMW_UNREF_CONTEXT 8 48 #define DRM_VMW_CREATE_SURFACE 9 49 #define DRM_VMW_UNREF_SURFACE 10 50 #define DRM_VMW_REF_SURFACE 11 51 #define DRM_VMW_EXECBUF 12 52 #define DRM_VMW_GET_3D_CAP 13 53 #define DRM_VMW_FENCE_WAIT 14 54 #define DRM_VMW_FENCE_SIGNALED 15 55 #define DRM_VMW_FENCE_UNREF 16 56 #define DRM_VMW_FENCE_EVENT 17 57 #define DRM_VMW_PRESENT 18 58 #define DRM_VMW_PRESENT_READBACK 19 59 #define DRM_VMW_UPDATE_LAYOUT 20 60 #define DRM_VMW_CREATE_SHADER 21 61 #define DRM_VMW_UNREF_SHADER 22 62 #define DRM_VMW_GB_SURFACE_CREATE 23 63 #define DRM_VMW_GB_SURFACE_REF 24 64 #define DRM_VMW_SYNCCPU 25 65 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 66 67 /*************************************************************************/ 68 /** 69 * DRM_VMW_GET_PARAM - get device information. 70 * 71 * DRM_VMW_PARAM_FIFO_OFFSET: 72 * Offset to use to map the first page of the FIFO read-only. 73 * The fifo is mapped using the mmap() system call on the drm device. 74 * 75 * DRM_VMW_PARAM_OVERLAY_IOCTL: 76 * Does the driver support the overlay ioctl. 77 */ 78 79 #define DRM_VMW_PARAM_NUM_STREAMS 0 80 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 81 #define DRM_VMW_PARAM_3D 2 82 #define DRM_VMW_PARAM_HW_CAPS 3 83 #define DRM_VMW_PARAM_FIFO_CAPS 4 84 #define DRM_VMW_PARAM_MAX_FB_SIZE 5 85 #define DRM_VMW_PARAM_FIFO_HW_VERSION 6 86 #define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 87 #define DRM_VMW_PARAM_3D_CAPS_SIZE 8 88 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 89 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 90 #define DRM_VMW_PARAM_SCREEN_TARGET 11 91 #define DRM_VMW_PARAM_DX 12 92 93 /** 94 * enum drm_vmw_handle_type - handle type for ref ioctls 95 * 96 */ 97 enum drm_vmw_handle_type { 98 DRM_VMW_HANDLE_LEGACY = 0, 99 DRM_VMW_HANDLE_PRIME = 1 100 }; 101 102 /** 103 * struct drm_vmw_getparam_arg 104 * 105 * @value: Returned value. //Out 106 * @param: Parameter to query. //In. 107 * 108 * Argument to the DRM_VMW_GET_PARAM Ioctl. 109 */ 110 111 struct drm_vmw_getparam_arg { 112 __u64 value; 113 __u32 param; 114 __u32 pad64; 115 }; 116 117 /*************************************************************************/ 118 /** 119 * DRM_VMW_CREATE_CONTEXT - Create a host context. 120 * 121 * Allocates a device unique context id, and queues a create context command 122 * for the host. Does not wait for host completion. 123 */ 124 125 /** 126 * struct drm_vmw_context_arg 127 * 128 * @cid: Device unique context ID. 129 * 130 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 131 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 132 */ 133 134 struct drm_vmw_context_arg { 135 __s32 cid; 136 __u32 pad64; 137 }; 138 139 /*************************************************************************/ 140 /** 141 * DRM_VMW_UNREF_CONTEXT - Create a host context. 142 * 143 * Frees a global context id, and queues a destroy host command for the host. 144 * Does not wait for host completion. The context ID can be used directly 145 * in the command stream and shows up as the same context ID on the host. 146 */ 147 148 /*************************************************************************/ 149 /** 150 * DRM_VMW_CREATE_SURFACE - Create a host suface. 151 * 152 * Allocates a device unique surface id, and queues a create surface command 153 * for the host. Does not wait for host completion. The surface ID can be 154 * used directly in the command stream and shows up as the same surface 155 * ID on the host. 156 */ 157 158 /** 159 * struct drm_wmv_surface_create_req 160 * 161 * @flags: Surface flags as understood by the host. 162 * @format: Surface format as understood by the host. 163 * @mip_levels: Number of mip levels for each face. 164 * An unused face should have 0 encoded. 165 * @size_addr: Address of a user-space array of sruct drm_vmw_size 166 * cast to an __u64 for 32-64 bit compatibility. 167 * The size of the array should equal the total number of mipmap levels. 168 * @shareable: Boolean whether other clients (as identified by file descriptors) 169 * may reference this surface. 170 * @scanout: Boolean whether the surface is intended to be used as a 171 * scanout. 172 * 173 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. 174 * Output data from the DRM_VMW_REF_SURFACE Ioctl. 175 */ 176 177 struct drm_vmw_surface_create_req { 178 __u32 flags; 179 __u32 format; 180 __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 181 __u64 size_addr; 182 __s32 shareable; 183 __s32 scanout; 184 }; 185 186 /** 187 * struct drm_wmv_surface_arg 188 * 189 * @sid: Surface id of created surface or surface to destroy or reference. 190 * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. 191 * 192 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. 193 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. 194 * Input argument to the DRM_VMW_REF_SURFACE Ioctl. 195 */ 196 197 struct drm_vmw_surface_arg { 198 __s32 sid; 199 enum drm_vmw_handle_type handle_type; 200 }; 201 202 /** 203 * struct drm_vmw_size ioctl. 204 * 205 * @width - mip level width 206 * @height - mip level height 207 * @depth - mip level depth 208 * 209 * Description of a mip level. 210 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. 211 */ 212 213 struct drm_vmw_size { 214 __u32 width; 215 __u32 height; 216 __u32 depth; 217 __u32 pad64; 218 }; 219 220 /** 221 * union drm_vmw_surface_create_arg 222 * 223 * @rep: Output data as described above. 224 * @req: Input data as described above. 225 * 226 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. 227 */ 228 229 union drm_vmw_surface_create_arg { 230 struct drm_vmw_surface_arg rep; 231 struct drm_vmw_surface_create_req req; 232 }; 233 234 /*************************************************************************/ 235 /** 236 * DRM_VMW_REF_SURFACE - Reference a host surface. 237 * 238 * Puts a reference on a host surface with a give sid, as previously 239 * returned by the DRM_VMW_CREATE_SURFACE ioctl. 240 * A reference will make sure the surface isn't destroyed while we hold 241 * it and will allow the calling client to use the surface ID in the command 242 * stream. 243 * 244 * On successful return, the Ioctl returns the surface information given 245 * in the DRM_VMW_CREATE_SURFACE ioctl. 246 */ 247 248 /** 249 * union drm_vmw_surface_reference_arg 250 * 251 * @rep: Output data as described above. 252 * @req: Input data as described above. 253 * 254 * Argument to the DRM_VMW_REF_SURFACE Ioctl. 255 */ 256 257 union drm_vmw_surface_reference_arg { 258 struct drm_vmw_surface_create_req rep; 259 struct drm_vmw_surface_arg req; 260 }; 261 262 /*************************************************************************/ 263 /** 264 * DRM_VMW_UNREF_SURFACE - Unreference a host surface. 265 * 266 * Clear a reference previously put on a host surface. 267 * When all references are gone, including the one implicitly placed 268 * on creation, 269 * a destroy surface command will be queued for the host. 270 * Does not wait for completion. 271 */ 272 273 /*************************************************************************/ 274 /** 275 * DRM_VMW_EXECBUF 276 * 277 * Submit a command buffer for execution on the host, and return a 278 * fence seqno that when signaled, indicates that the command buffer has 279 * executed. 280 */ 281 282 /** 283 * struct drm_vmw_execbuf_arg 284 * 285 * @commands: User-space address of a command buffer cast to an __u64. 286 * @command-size: Size in bytes of the command buffer. 287 * @throttle-us: Sleep until software is less than @throttle_us 288 * microseconds ahead of hardware. The driver may round this value 289 * to the nearest kernel tick. 290 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an 291 * __u64. 292 * @version: Allows expanding the execbuf ioctl parameters without breaking 293 * backwards compatibility, since user-space will always tell the kernel 294 * which version it uses. 295 * @flags: Execbuf flags. None currently. 296 * 297 * Argument to the DRM_VMW_EXECBUF Ioctl. 298 */ 299 300 #define DRM_VMW_EXECBUF_VERSION 2 301 302 struct drm_vmw_execbuf_arg { 303 __u64 commands; 304 __u32 command_size; 305 __u32 throttle_us; 306 __u64 fence_rep; 307 __u32 version; 308 __u32 flags; 309 __u32 context_handle; 310 __u32 pad64; 311 }; 312 313 /** 314 * struct drm_vmw_fence_rep 315 * 316 * @handle: Fence object handle for fence associated with a command submission. 317 * @mask: Fence flags relevant for this fence object. 318 * @seqno: Fence sequence number in fifo. A fence object with a lower 319 * seqno will signal the EXEC flag before a fence object with a higher 320 * seqno. This can be used by user-space to avoid kernel calls to determine 321 * whether a fence has signaled the EXEC flag. Note that @seqno will 322 * wrap at 32-bit. 323 * @passed_seqno: The highest seqno number processed by the hardware 324 * so far. This can be used to mark user-space fence objects as signaled, and 325 * to determine whether a fence seqno might be stale. 326 * @error: This member should've been set to -EFAULT on submission. 327 * The following actions should be take on completion: 328 * error == -EFAULT: Fence communication failed. The host is synchronized. 329 * Use the last fence id read from the FIFO fence register. 330 * error != 0 && error != -EFAULT: 331 * Fence submission failed. The host is synchronized. Use the fence_seq member. 332 * error == 0: All is OK, The host may not be synchronized. 333 * Use the fence_seq member. 334 * 335 * Input / Output data to the DRM_VMW_EXECBUF Ioctl. 336 */ 337 338 struct drm_vmw_fence_rep { 339 __u32 handle; 340 __u32 mask; 341 __u32 seqno; 342 __u32 passed_seqno; 343 __u32 pad64; 344 __s32 error; 345 }; 346 347 /*************************************************************************/ 348 /** 349 * DRM_VMW_ALLOC_DMABUF 350 * 351 * Allocate a DMA buffer that is visible also to the host. 352 * NOTE: The buffer is 353 * identified by a handle and an offset, which are private to the guest, but 354 * useable in the command stream. The guest kernel may translate these 355 * and patch up the command stream accordingly. In the future, the offset may 356 * be zero at all times, or it may disappear from the interface before it is 357 * fixed. 358 * 359 * The DMA buffer may stay user-space mapped in the guest at all times, 360 * and is thus suitable for sub-allocation. 361 * 362 * DMA buffers are mapped using the mmap() syscall on the drm device. 363 */ 364 365 /** 366 * struct drm_vmw_alloc_dmabuf_req 367 * 368 * @size: Required minimum size of the buffer. 369 * 370 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. 371 */ 372 373 struct drm_vmw_alloc_dmabuf_req { 374 __u32 size; 375 __u32 pad64; 376 }; 377 378 /** 379 * struct drm_vmw_dmabuf_rep 380 * 381 * @map_handle: Offset to use in the mmap() call used to map the buffer. 382 * @handle: Handle unique to this buffer. Used for unreferencing. 383 * @cur_gmr_id: GMR id to use in the command stream when this buffer is 384 * referenced. See not above. 385 * @cur_gmr_offset: Offset to use in the command stream when this buffer is 386 * referenced. See note above. 387 * 388 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. 389 */ 390 391 struct drm_vmw_dmabuf_rep { 392 __u64 map_handle; 393 __u32 handle; 394 __u32 cur_gmr_id; 395 __u32 cur_gmr_offset; 396 __u32 pad64; 397 }; 398 399 /** 400 * union drm_vmw_dmabuf_arg 401 * 402 * @req: Input data as described above. 403 * @rep: Output data as described above. 404 * 405 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. 406 */ 407 408 union drm_vmw_alloc_dmabuf_arg { 409 struct drm_vmw_alloc_dmabuf_req req; 410 struct drm_vmw_dmabuf_rep rep; 411 }; 412 413 /*************************************************************************/ 414 /** 415 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer. 416 * 417 */ 418 419 /** 420 * struct drm_vmw_unref_dmabuf_arg 421 * 422 * @handle: Handle indicating what buffer to free. Obtained from the 423 * DRM_VMW_ALLOC_DMABUF Ioctl. 424 * 425 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl. 426 */ 427 428 struct drm_vmw_unref_dmabuf_arg { 429 __u32 handle; 430 __u32 pad64; 431 }; 432 433 /*************************************************************************/ 434 /** 435 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 436 * 437 * This IOCTL controls the overlay units of the svga device. 438 * The SVGA overlay units does not work like regular hardware units in 439 * that they do not automaticaly read back the contents of the given dma 440 * buffer. But instead only read back for each call to this ioctl, and 441 * at any point between this call being made and a following call that 442 * either changes the buffer or disables the stream. 443 */ 444 445 /** 446 * struct drm_vmw_rect 447 * 448 * Defines a rectangle. Used in the overlay ioctl to define 449 * source and destination rectangle. 450 */ 451 452 struct drm_vmw_rect { 453 __s32 x; 454 __s32 y; 455 __u32 w; 456 __u32 h; 457 }; 458 459 /** 460 * struct drm_vmw_control_stream_arg 461 * 462 * @stream_id: Stearm to control 463 * @enabled: If false all following arguments are ignored. 464 * @handle: Handle to buffer for getting data from. 465 * @format: Format of the overlay as understood by the host. 466 * @width: Width of the overlay. 467 * @height: Height of the overlay. 468 * @size: Size of the overlay in bytes. 469 * @pitch: Array of pitches, the two last are only used for YUV12 formats. 470 * @offset: Offset from start of dma buffer to overlay. 471 * @src: Source rect, must be within the defined area above. 472 * @dst: Destination rect, x and y may be negative. 473 * 474 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. 475 */ 476 477 struct drm_vmw_control_stream_arg { 478 __u32 stream_id; 479 __u32 enabled; 480 481 __u32 flags; 482 __u32 color_key; 483 484 __u32 handle; 485 __u32 offset; 486 __s32 format; 487 __u32 size; 488 __u32 width; 489 __u32 height; 490 __u32 pitch[3]; 491 492 __u32 pad64; 493 struct drm_vmw_rect src; 494 struct drm_vmw_rect dst; 495 }; 496 497 /*************************************************************************/ 498 /** 499 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. 500 * 501 */ 502 503 #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) 504 #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) 505 506 /** 507 * struct drm_vmw_cursor_bypass_arg 508 * 509 * @flags: Flags. 510 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. 511 * @xpos: X position of cursor. 512 * @ypos: Y position of cursor. 513 * @xhot: X hotspot. 514 * @yhot: Y hotspot. 515 * 516 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. 517 */ 518 519 struct drm_vmw_cursor_bypass_arg { 520 __u32 flags; 521 __u32 crtc_id; 522 __s32 xpos; 523 __s32 ypos; 524 __s32 xhot; 525 __s32 yhot; 526 }; 527 528 /*************************************************************************/ 529 /** 530 * DRM_VMW_CLAIM_STREAM - Claim a single stream. 531 */ 532 533 /** 534 * struct drm_vmw_context_arg 535 * 536 * @stream_id: Device unique context ID. 537 * 538 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. 539 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. 540 */ 541 542 struct drm_vmw_stream_arg { 543 __u32 stream_id; 544 __u32 pad64; 545 }; 546 547 /*************************************************************************/ 548 /** 549 * DRM_VMW_UNREF_STREAM - Unclaim a stream. 550 * 551 * Return a single stream that was claimed by this process. Also makes 552 * sure that the stream has been stopped. 553 */ 554 555 /*************************************************************************/ 556 /** 557 * DRM_VMW_GET_3D_CAP 558 * 559 * Read 3D capabilities from the FIFO 560 * 561 */ 562 563 /** 564 * struct drm_vmw_get_3d_cap_arg 565 * 566 * @buffer: Pointer to a buffer for capability data, cast to an __u64 567 * @size: Max size to copy 568 * 569 * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL 570 * ioctls. 571 */ 572 573 struct drm_vmw_get_3d_cap_arg { 574 __u64 buffer; 575 __u32 max_size; 576 __u32 pad64; 577 }; 578 579 /*************************************************************************/ 580 /** 581 * DRM_VMW_FENCE_WAIT 582 * 583 * Waits for a fence object to signal. The wait is interruptible, so that 584 * signals may be delivered during the interrupt. The wait may timeout, 585 * in which case the calls returns -EBUSY. If the wait is restarted, 586 * that is restarting without resetting @cookie_valid to zero, 587 * the timeout is computed from the first call. 588 * 589 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait 590 * on: 591 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command 592 * stream 593 * have executed. 594 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish 595 * commands 596 * in the buffer given to the EXECBUF ioctl returning the fence object handle 597 * are available to user-space. 598 * 599 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the 600 * fenc wait ioctl returns 0, the fence object has been unreferenced after 601 * the wait. 602 */ 603 604 #define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) 605 #define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) 606 607 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) 608 609 /** 610 * struct drm_vmw_fence_wait_arg 611 * 612 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 613 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. 614 * @kernel_cookie: Set to 0 on first call. Left alone on restart. 615 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. 616 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick 617 * before returning. 618 * @flags: Fence flags to wait on. 619 * @wait_options: Options that control the behaviour of the wait ioctl. 620 * 621 * Input argument to the DRM_VMW_FENCE_WAIT ioctl. 622 */ 623 624 struct drm_vmw_fence_wait_arg { 625 __u32 handle; 626 __s32 cookie_valid; 627 __u64 kernel_cookie; 628 __u64 timeout_us; 629 __s32 lazy; 630 __s32 flags; 631 __s32 wait_options; 632 __s32 pad64; 633 }; 634 635 /*************************************************************************/ 636 /** 637 * DRM_VMW_FENCE_SIGNALED 638 * 639 * Checks if a fence object is signaled.. 640 */ 641 642 /** 643 * struct drm_vmw_fence_signaled_arg 644 * 645 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 646 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl 647 * @signaled: Out: Flags signaled. 648 * @sequence: Out: Highest sequence passed so far. Can be used to signal the 649 * EXEC flag of user-space fence objects. 650 * 651 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF 652 * ioctls. 653 */ 654 655 struct drm_vmw_fence_signaled_arg { 656 __u32 handle; 657 __u32 flags; 658 __s32 signaled; 659 __u32 passed_seqno; 660 __u32 signaled_flags; 661 __u32 pad64; 662 }; 663 664 /*************************************************************************/ 665 /** 666 * DRM_VMW_FENCE_UNREF 667 * 668 * Unreferences a fence object, and causes it to be destroyed if there are no 669 * other references to it. 670 * 671 */ 672 673 /** 674 * struct drm_vmw_fence_arg 675 * 676 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. 677 * 678 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. 679 */ 680 681 struct drm_vmw_fence_arg { 682 __u32 handle; 683 __u32 pad64; 684 }; 685 686 687 /*************************************************************************/ 688 /** 689 * DRM_VMW_FENCE_EVENT 690 * 691 * Queues an event on a fence to be delivered on the drm character device 692 * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag. 693 * Optionally the approximate time when the fence signaled is 694 * given by the event. 695 */ 696 697 /* 698 * The event type 699 */ 700 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000 701 702 struct drm_vmw_event_fence { 703 struct drm_event base; 704 __u64 user_data; 705 __u32 tv_sec; 706 __u32 tv_usec; 707 }; 708 709 /* 710 * Flags that may be given to the command. 711 */ 712 /* Request fence signaled time on the event. */ 713 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0) 714 715 /** 716 * struct drm_vmw_fence_event_arg 717 * 718 * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if 719 * the fence is not supposed to be referenced by user-space. 720 * @user_info: Info to be delivered with the event. 721 * @handle: Attach the event to this fence only. 722 * @flags: A set of flags as defined above. 723 */ 724 struct drm_vmw_fence_event_arg { 725 __u64 fence_rep; 726 __u64 user_data; 727 __u32 handle; 728 __u32 flags; 729 }; 730 731 732 /*************************************************************************/ 733 /** 734 * DRM_VMW_PRESENT 735 * 736 * Executes an SVGA present on a given fb for a given surface. The surface 737 * is placed on the framebuffer. Cliprects are given relative to the given 738 * point (the point disignated by dest_{x|y}). 739 * 740 */ 741 742 /** 743 * struct drm_vmw_present_arg 744 * @fb_id: framebuffer id to present / read back from. 745 * @sid: Surface id to present from. 746 * @dest_x: X placement coordinate for surface. 747 * @dest_y: Y placement coordinate for surface. 748 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 749 * @num_clips: Number of cliprects given relative to the framebuffer origin, 750 * in the same coordinate space as the frame buffer. 751 * @pad64: Unused 64-bit padding. 752 * 753 * Input argument to the DRM_VMW_PRESENT ioctl. 754 */ 755 756 struct drm_vmw_present_arg { 757 __u32 fb_id; 758 __u32 sid; 759 __s32 dest_x; 760 __s32 dest_y; 761 __u64 clips_ptr; 762 __u32 num_clips; 763 __u32 pad64; 764 }; 765 766 767 /*************************************************************************/ 768 /** 769 * DRM_VMW_PRESENT_READBACK 770 * 771 * Executes an SVGA present readback from a given fb to the dma buffer 772 * currently bound as the fb. If there is no dma buffer bound to the fb, 773 * an error will be returned. 774 * 775 */ 776 777 /** 778 * struct drm_vmw_present_arg 779 * @fb_id: fb_id to present / read back from. 780 * @num_clips: Number of cliprects. 781 * @clips_ptr: Pointer to an array of clip rects cast to an __u64. 782 * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64. 783 * If this member is NULL, then the ioctl should not return a fence. 784 */ 785 786 struct drm_vmw_present_readback_arg { 787 __u32 fb_id; 788 __u32 num_clips; 789 __u64 clips_ptr; 790 __u64 fence_rep; 791 }; 792 793 /*************************************************************************/ 794 /** 795 * DRM_VMW_UPDATE_LAYOUT - Update layout 796 * 797 * Updates the preferred modes and connection status for connectors. The 798 * command consists of one drm_vmw_update_layout_arg pointing to an array 799 * of num_outputs drm_vmw_rect's. 800 */ 801 802 /** 803 * struct drm_vmw_update_layout_arg 804 * 805 * @num_outputs: number of active connectors 806 * @rects: pointer to array of drm_vmw_rect cast to an __u64 807 * 808 * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl. 809 */ 810 struct drm_vmw_update_layout_arg { 811 __u32 num_outputs; 812 __u32 pad64; 813 __u64 rects; 814 }; 815 816 817 /*************************************************************************/ 818 /** 819 * DRM_VMW_CREATE_SHADER - Create shader 820 * 821 * Creates a shader and optionally binds it to a dma buffer containing 822 * the shader byte-code. 823 */ 824 825 /** 826 * enum drm_vmw_shader_type - Shader types 827 */ 828 enum drm_vmw_shader_type { 829 drm_vmw_shader_type_vs = 0, 830 drm_vmw_shader_type_ps, 831 }; 832 833 834 /** 835 * struct drm_vmw_shader_create_arg 836 * 837 * @shader_type: Shader type of the shader to create. 838 * @size: Size of the byte-code in bytes. 839 * where the shader byte-code starts 840 * @buffer_handle: Buffer handle identifying the buffer containing the 841 * shader byte-code 842 * @shader_handle: On successful completion contains a handle that 843 * can be used to subsequently identify the shader. 844 * @offset: Offset in bytes into the buffer given by @buffer_handle, 845 * 846 * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl. 847 */ 848 struct drm_vmw_shader_create_arg { 849 enum drm_vmw_shader_type shader_type; 850 __u32 size; 851 __u32 buffer_handle; 852 __u32 shader_handle; 853 __u64 offset; 854 }; 855 856 /*************************************************************************/ 857 /** 858 * DRM_VMW_UNREF_SHADER - Unreferences a shader 859 * 860 * Destroys a user-space reference to a shader, optionally destroying 861 * it. 862 */ 863 864 /** 865 * struct drm_vmw_shader_arg 866 * 867 * @handle: Handle identifying the shader to destroy. 868 * 869 * Input argument to the DRM_VMW_UNREF_SHADER ioctl. 870 */ 871 struct drm_vmw_shader_arg { 872 __u32 handle; 873 __u32 pad64; 874 }; 875 876 /*************************************************************************/ 877 /** 878 * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface. 879 * 880 * Allocates a surface handle and queues a create surface command 881 * for the host on the first use of the surface. The surface ID can 882 * be used as the surface ID in commands referencing the surface. 883 */ 884 885 /** 886 * enum drm_vmw_surface_flags 887 * 888 * @drm_vmw_surface_flag_shareable: Whether the surface is shareable 889 * @drm_vmw_surface_flag_scanout: Whether the surface is a scanout 890 * surface. 891 * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is 892 * given. 893 */ 894 enum drm_vmw_surface_flags { 895 drm_vmw_surface_flag_shareable = (1 << 0), 896 drm_vmw_surface_flag_scanout = (1 << 1), 897 drm_vmw_surface_flag_create_buffer = (1 << 2) 898 }; 899 900 /** 901 * struct drm_vmw_gb_surface_create_req 902 * 903 * @svga3d_flags: SVGA3d surface flags for the device. 904 * @format: SVGA3d format. 905 * @mip_level: Number of mip levels for all faces. 906 * @drm_surface_flags Flags as described above. 907 * @multisample_count Future use. Set to 0. 908 * @autogen_filter Future use. Set to 0. 909 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 910 * if none. 911 * @base_size Size of the base mip level for all faces. 912 * @array_size Must be zero for non-DX hardware, and if non-zero 913 * svga3d_flags must have proper bind flags setup. 914 * 915 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 916 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. 917 */ 918 struct drm_vmw_gb_surface_create_req { 919 __u32 svga3d_flags; 920 __u32 format; 921 __u32 mip_levels; 922 enum drm_vmw_surface_flags drm_surface_flags; 923 __u32 multisample_count; 924 __u32 autogen_filter; 925 __u32 buffer_handle; 926 __u32 array_size; 927 struct drm_vmw_size base_size; 928 }; 929 930 /** 931 * struct drm_vmw_gb_surface_create_rep 932 * 933 * @handle: Surface handle. 934 * @backup_size: Size of backup buffers for this surface. 935 * @buffer_handle: Handle of backup buffer. SVGA3D_INVALID_ID if none. 936 * @buffer_size: Actual size of the buffer identified by 937 * @buffer_handle 938 * @buffer_map_handle: Offset into device address space for the buffer 939 * identified by @buffer_handle. 940 * 941 * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl. 942 * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl. 943 */ 944 struct drm_vmw_gb_surface_create_rep { 945 __u32 handle; 946 __u32 backup_size; 947 __u32 buffer_handle; 948 __u32 buffer_size; 949 __u64 buffer_map_handle; 950 }; 951 952 /** 953 * union drm_vmw_gb_surface_create_arg 954 * 955 * @req: Input argument as described above. 956 * @rep: Output argument as described above. 957 * 958 * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl. 959 */ 960 union drm_vmw_gb_surface_create_arg { 961 struct drm_vmw_gb_surface_create_rep rep; 962 struct drm_vmw_gb_surface_create_req req; 963 }; 964 965 /*************************************************************************/ 966 /** 967 * DRM_VMW_GB_SURFACE_REF - Reference a host surface. 968 * 969 * Puts a reference on a host surface with a given handle, as previously 970 * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl. 971 * A reference will make sure the surface isn't destroyed while we hold 972 * it and will allow the calling client to use the surface handle in 973 * the command stream. 974 * 975 * On successful return, the Ioctl returns the surface information given 976 * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl. 977 */ 978 979 /** 980 * struct drm_vmw_gb_surface_reference_arg 981 * 982 * @creq: The data used as input when the surface was created, as described 983 * above at "struct drm_vmw_gb_surface_create_req" 984 * @crep: Additional data output when the surface was created, as described 985 * above at "struct drm_vmw_gb_surface_create_rep" 986 * 987 * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl. 988 */ 989 struct drm_vmw_gb_surface_ref_rep { 990 struct drm_vmw_gb_surface_create_req creq; 991 struct drm_vmw_gb_surface_create_rep crep; 992 }; 993 994 /** 995 * union drm_vmw_gb_surface_reference_arg 996 * 997 * @req: Input data as described above at "struct drm_vmw_surface_arg" 998 * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep" 999 * 1000 * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl. 1001 */ 1002 union drm_vmw_gb_surface_reference_arg { 1003 struct drm_vmw_gb_surface_ref_rep rep; 1004 struct drm_vmw_surface_arg req; 1005 }; 1006 1007 1008 /*************************************************************************/ 1009 /** 1010 * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access. 1011 * 1012 * Idles any previously submitted GPU operations on the buffer and 1013 * by default blocks command submissions that reference the buffer. 1014 * If the file descriptor used to grab a blocking CPU sync is closed, the 1015 * cpu sync is released. 1016 * The flags argument indicates how the grab / release operation should be 1017 * performed: 1018 */ 1019 1020 /** 1021 * enum drm_vmw_synccpu_flags - Synccpu flags: 1022 * 1023 * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a 1024 * hint to the kernel to allow command submissions that references the buffer 1025 * for read-only. 1026 * @drm_vmw_synccpu_write: Sync for write. Block all command submissions 1027 * referencing this buffer. 1028 * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return 1029 * -EBUSY should the buffer be busy. 1030 * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer 1031 * while the buffer is synced for CPU. This is similar to the GEM bo idle 1032 * behavior. 1033 */ 1034 enum drm_vmw_synccpu_flags { 1035 drm_vmw_synccpu_read = (1 << 0), 1036 drm_vmw_synccpu_write = (1 << 1), 1037 drm_vmw_synccpu_dontblock = (1 << 2), 1038 drm_vmw_synccpu_allow_cs = (1 << 3) 1039 }; 1040 1041 /** 1042 * enum drm_vmw_synccpu_op - Synccpu operations: 1043 * 1044 * @drm_vmw_synccpu_grab: Grab the buffer for CPU operations 1045 * @drm_vmw_synccpu_release: Release a previous grab. 1046 */ 1047 enum drm_vmw_synccpu_op { 1048 drm_vmw_synccpu_grab, 1049 drm_vmw_synccpu_release 1050 }; 1051 1052 /** 1053 * struct drm_vmw_synccpu_arg 1054 * 1055 * @op: The synccpu operation as described above. 1056 * @handle: Handle identifying the buffer object. 1057 * @flags: Flags as described above. 1058 */ 1059 struct drm_vmw_synccpu_arg { 1060 enum drm_vmw_synccpu_op op; 1061 enum drm_vmw_synccpu_flags flags; 1062 __u32 handle; 1063 __u32 pad64; 1064 }; 1065 1066 /*************************************************************************/ 1067 /** 1068 * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1069 * 1070 * Allocates a device unique context id, and queues a create context command 1071 * for the host. Does not wait for host completion. 1072 */ 1073 enum drm_vmw_extended_context { 1074 drm_vmw_context_legacy, 1075 drm_vmw_context_dx 1076 }; 1077 1078 /** 1079 * union drm_vmw_extended_context_arg 1080 * 1081 * @req: Context type. 1082 * @rep: Context identifier. 1083 * 1084 * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1085 */ 1086 union drm_vmw_extended_context_arg { 1087 enum drm_vmw_extended_context req; 1088 struct drm_vmw_context_arg rep; 1089 }; 1090 #endif 1091