• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef __VMWGFX_DRM_H__
29 #define __VMWGFX_DRM_H__
30 
31 #include "drm.h"
32 
33 #if defined(__cplusplus)
34 extern "C" {
35 #endif
36 
37 #define DRM_VMW_MAX_SURFACE_FACES 6
38 #define DRM_VMW_MAX_MIP_LEVELS 24
39 
40 
41 #define DRM_VMW_GET_PARAM            0
42 #define DRM_VMW_ALLOC_DMABUF         1
43 #define DRM_VMW_ALLOC_BO             1
44 #define DRM_VMW_UNREF_DMABUF         2
45 #define DRM_VMW_HANDLE_CLOSE         2
46 #define DRM_VMW_CURSOR_BYPASS        3
47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
48 #define DRM_VMW_CONTROL_STREAM       4
49 #define DRM_VMW_CLAIM_STREAM         5
50 #define DRM_VMW_UNREF_STREAM         6
51 /* guarded by DRM_VMW_PARAM_3D == 1 */
52 #define DRM_VMW_CREATE_CONTEXT       7
53 #define DRM_VMW_UNREF_CONTEXT        8
54 #define DRM_VMW_CREATE_SURFACE       9
55 #define DRM_VMW_UNREF_SURFACE        10
56 #define DRM_VMW_REF_SURFACE          11
57 #define DRM_VMW_EXECBUF              12
58 #define DRM_VMW_GET_3D_CAP           13
59 #define DRM_VMW_FENCE_WAIT           14
60 #define DRM_VMW_FENCE_SIGNALED       15
61 #define DRM_VMW_FENCE_UNREF          16
62 #define DRM_VMW_FENCE_EVENT          17
63 #define DRM_VMW_PRESENT              18
64 #define DRM_VMW_PRESENT_READBACK     19
65 #define DRM_VMW_UPDATE_LAYOUT        20
66 #define DRM_VMW_CREATE_SHADER        21
67 #define DRM_VMW_UNREF_SHADER         22
68 #define DRM_VMW_GB_SURFACE_CREATE    23
69 #define DRM_VMW_GB_SURFACE_REF       24
70 #define DRM_VMW_SYNCCPU              25
71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
72 #define DRM_VMW_GB_SURFACE_CREATE_EXT   27
73 #define DRM_VMW_GB_SURFACE_REF_EXT      28
74 #define DRM_VMW_MSG                     29
75 
76 /*************************************************************************/
77 /**
78  * DRM_VMW_GET_PARAM - get device information.
79  *
80  * DRM_VMW_PARAM_FIFO_OFFSET:
81  * Offset to use to map the first page of the FIFO read-only.
82  * The fifo is mapped using the mmap() system call on the drm device.
83  *
84  * DRM_VMW_PARAM_OVERLAY_IOCTL:
85  * Does the driver support the overlay ioctl.
86  *
87  * DRM_VMW_PARAM_SM4_1
88  * SM4_1 support is enabled.
89  */
90 
91 #define DRM_VMW_PARAM_NUM_STREAMS      0
92 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
93 #define DRM_VMW_PARAM_3D               2
94 #define DRM_VMW_PARAM_HW_CAPS          3
95 #define DRM_VMW_PARAM_FIFO_CAPS        4
96 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
97 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
98 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
99 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
100 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
101 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
102 #define DRM_VMW_PARAM_SCREEN_TARGET    11
103 #define DRM_VMW_PARAM_DX               12
104 #define DRM_VMW_PARAM_HW_CAPS2         13
105 #define DRM_VMW_PARAM_SM4_1            14
106 #define DRM_VMW_PARAM_SM5              15
107 
108 /**
109  * enum drm_vmw_handle_type - handle type for ref ioctls
110  *
111  */
112 enum drm_vmw_handle_type {
113 	DRM_VMW_HANDLE_LEGACY = 0,
114 	DRM_VMW_HANDLE_PRIME = 1
115 };
116 
117 /**
118  * struct drm_vmw_getparam_arg
119  *
120  * @value: Returned value. //Out
121  * @param: Parameter to query. //In.
122  *
123  * Argument to the DRM_VMW_GET_PARAM Ioctl.
124  */
125 
126 struct drm_vmw_getparam_arg {
127 	__u64 value;
128 	__u32 param;
129 	__u32 pad64;
130 };
131 
132 /*************************************************************************/
133 /**
134  * DRM_VMW_CREATE_CONTEXT - Create a host context.
135  *
136  * Allocates a device unique context id, and queues a create context command
137  * for the host. Does not wait for host completion.
138  */
139 
140 /**
141  * struct drm_vmw_context_arg
142  *
143  * @cid: Device unique context ID.
144  *
145  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
146  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
147  */
148 
149 struct drm_vmw_context_arg {
150 	__s32 cid;
151 	__u32 pad64;
152 };
153 
154 /*************************************************************************/
155 /**
156  * DRM_VMW_UNREF_CONTEXT - Create a host context.
157  *
158  * Frees a global context id, and queues a destroy host command for the host.
159  * Does not wait for host completion. The context ID can be used directly
160  * in the command stream and shows up as the same context ID on the host.
161  */
162 
163 /*************************************************************************/
164 /**
165  * DRM_VMW_CREATE_SURFACE - Create a host suface.
166  *
167  * Allocates a device unique surface id, and queues a create surface command
168  * for the host. Does not wait for host completion. The surface ID can be
169  * used directly in the command stream and shows up as the same surface
170  * ID on the host.
171  */
172 
173 /**
174  * struct drm_wmv_surface_create_req
175  *
176  * @flags: Surface flags as understood by the host.
177  * @format: Surface format as understood by the host.
178  * @mip_levels: Number of mip levels for each face.
179  * An unused face should have 0 encoded.
180  * @size_addr: Address of a user-space array of sruct drm_vmw_size
181  * cast to an __u64 for 32-64 bit compatibility.
182  * The size of the array should equal the total number of mipmap levels.
183  * @shareable: Boolean whether other clients (as identified by file descriptors)
184  * may reference this surface.
185  * @scanout: Boolean whether the surface is intended to be used as a
186  * scanout.
187  *
188  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
189  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
190  */
191 
192 struct drm_vmw_surface_create_req {
193 	__u32 flags;
194 	__u32 format;
195 	__u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
196 	__u64 size_addr;
197 	__s32 shareable;
198 	__s32 scanout;
199 };
200 
201 /**
202  * struct drm_wmv_surface_arg
203  *
204  * @sid: Surface id of created surface or surface to destroy or reference.
205  * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
206  *
207  * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
208  * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
209  * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
210  */
211 
212 struct drm_vmw_surface_arg {
213 	__s32 sid;
214 	enum drm_vmw_handle_type handle_type;
215 };
216 
217 /**
218  * struct drm_vmw_size ioctl.
219  *
220  * @width - mip level width
221  * @height - mip level height
222  * @depth - mip level depth
223  *
224  * Description of a mip level.
225  * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
226  */
227 
228 struct drm_vmw_size {
229 	__u32 width;
230 	__u32 height;
231 	__u32 depth;
232 	__u32 pad64;
233 };
234 
235 /**
236  * union drm_vmw_surface_create_arg
237  *
238  * @rep: Output data as described above.
239  * @req: Input data as described above.
240  *
241  * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
242  */
243 
244 union drm_vmw_surface_create_arg {
245 	struct drm_vmw_surface_arg rep;
246 	struct drm_vmw_surface_create_req req;
247 };
248 
249 /*************************************************************************/
250 /**
251  * DRM_VMW_REF_SURFACE - Reference a host surface.
252  *
253  * Puts a reference on a host surface with a give sid, as previously
254  * returned by the DRM_VMW_CREATE_SURFACE ioctl.
255  * A reference will make sure the surface isn't destroyed while we hold
256  * it and will allow the calling client to use the surface ID in the command
257  * stream.
258  *
259  * On successful return, the Ioctl returns the surface information given
260  * in the DRM_VMW_CREATE_SURFACE ioctl.
261  */
262 
263 /**
264  * union drm_vmw_surface_reference_arg
265  *
266  * @rep: Output data as described above.
267  * @req: Input data as described above.
268  *
269  * Argument to the DRM_VMW_REF_SURFACE Ioctl.
270  */
271 
272 union drm_vmw_surface_reference_arg {
273 	struct drm_vmw_surface_create_req rep;
274 	struct drm_vmw_surface_arg req;
275 };
276 
277 /*************************************************************************/
278 /**
279  * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
280  *
281  * Clear a reference previously put on a host surface.
282  * When all references are gone, including the one implicitly placed
283  * on creation,
284  * a destroy surface command will be queued for the host.
285  * Does not wait for completion.
286  */
287 
288 /*************************************************************************/
289 /**
290  * DRM_VMW_EXECBUF
291  *
292  * Submit a command buffer for execution on the host, and return a
293  * fence seqno that when signaled, indicates that the command buffer has
294  * executed.
295  */
296 
297 /**
298  * struct drm_vmw_execbuf_arg
299  *
300  * @commands: User-space address of a command buffer cast to an __u64.
301  * @command-size: Size in bytes of the command buffer.
302  * @throttle-us: Sleep until software is less than @throttle_us
303  * microseconds ahead of hardware. The driver may round this value
304  * to the nearest kernel tick.
305  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
306  * __u64.
307  * @version: Allows expanding the execbuf ioctl parameters without breaking
308  * backwards compatibility, since user-space will always tell the kernel
309  * which version it uses.
310  * @flags: Execbuf flags.
311  * @imported_fence_fd:  FD for a fence imported from another device
312  *
313  * Argument to the DRM_VMW_EXECBUF Ioctl.
314  */
315 
316 #define DRM_VMW_EXECBUF_VERSION 2
317 
318 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
319 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
320 
321 struct drm_vmw_execbuf_arg {
322 	__u64 commands;
323 	__u32 command_size;
324 	__u32 throttle_us;
325 	__u64 fence_rep;
326 	__u32 version;
327 	__u32 flags;
328 	__u32 context_handle;
329 	__s32 imported_fence_fd;
330 };
331 
332 /**
333  * struct drm_vmw_fence_rep
334  *
335  * @handle: Fence object handle for fence associated with a command submission.
336  * @mask: Fence flags relevant for this fence object.
337  * @seqno: Fence sequence number in fifo. A fence object with a lower
338  * seqno will signal the EXEC flag before a fence object with a higher
339  * seqno. This can be used by user-space to avoid kernel calls to determine
340  * whether a fence has signaled the EXEC flag. Note that @seqno will
341  * wrap at 32-bit.
342  * @passed_seqno: The highest seqno number processed by the hardware
343  * so far. This can be used to mark user-space fence objects as signaled, and
344  * to determine whether a fence seqno might be stale.
345  * @fd: FD associated with the fence, -1 if not exported
346  * @error: This member should've been set to -EFAULT on submission.
347  * The following actions should be take on completion:
348  * error == -EFAULT: Fence communication failed. The host is synchronized.
349  * Use the last fence id read from the FIFO fence register.
350  * error != 0 && error != -EFAULT:
351  * Fence submission failed. The host is synchronized. Use the fence_seq member.
352  * error == 0: All is OK, The host may not be synchronized.
353  * Use the fence_seq member.
354  *
355  * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
356  */
357 
358 struct drm_vmw_fence_rep {
359 	__u32 handle;
360 	__u32 mask;
361 	__u32 seqno;
362 	__u32 passed_seqno;
363 	__s32 fd;
364 	__s32 error;
365 };
366 
367 /*************************************************************************/
368 /**
369  * DRM_VMW_ALLOC_BO
370  *
371  * Allocate a buffer object that is visible also to the host.
372  * NOTE: The buffer is
373  * identified by a handle and an offset, which are private to the guest, but
374  * useable in the command stream. The guest kernel may translate these
375  * and patch up the command stream accordingly. In the future, the offset may
376  * be zero at all times, or it may disappear from the interface before it is
377  * fixed.
378  *
379  * The buffer object may stay user-space mapped in the guest at all times,
380  * and is thus suitable for sub-allocation.
381  *
382  * Buffer objects are mapped using the mmap() syscall on the drm device.
383  */
384 
385 /**
386  * struct drm_vmw_alloc_bo_req
387  *
388  * @size: Required minimum size of the buffer.
389  *
390  * Input data to the DRM_VMW_ALLOC_BO Ioctl.
391  */
392 
393 struct drm_vmw_alloc_bo_req {
394 	__u32 size;
395 	__u32 pad64;
396 };
397 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
398 
399 /**
400  * struct drm_vmw_bo_rep
401  *
402  * @map_handle: Offset to use in the mmap() call used to map the buffer.
403  * @handle: Handle unique to this buffer. Used for unreferencing.
404  * @cur_gmr_id: GMR id to use in the command stream when this buffer is
405  * referenced. See not above.
406  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
407  * referenced. See note above.
408  *
409  * Output data from the DRM_VMW_ALLOC_BO Ioctl.
410  */
411 
412 struct drm_vmw_bo_rep {
413 	__u64 map_handle;
414 	__u32 handle;
415 	__u32 cur_gmr_id;
416 	__u32 cur_gmr_offset;
417 	__u32 pad64;
418 };
419 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep
420 
421 /**
422  * union drm_vmw_alloc_bo_arg
423  *
424  * @req: Input data as described above.
425  * @rep: Output data as described above.
426  *
427  * Argument to the DRM_VMW_ALLOC_BO Ioctl.
428  */
429 
430 union drm_vmw_alloc_bo_arg {
431 	struct drm_vmw_alloc_bo_req req;
432 	struct drm_vmw_bo_rep rep;
433 };
434 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
435 
436 /*************************************************************************/
437 /**
438  * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
439  *
440  * This IOCTL controls the overlay units of the svga device.
441  * The SVGA overlay units does not work like regular hardware units in
442  * that they do not automaticaly read back the contents of the given dma
443  * buffer. But instead only read back for each call to this ioctl, and
444  * at any point between this call being made and a following call that
445  * either changes the buffer or disables the stream.
446  */
447 
448 /**
449  * struct drm_vmw_rect
450  *
451  * Defines a rectangle. Used in the overlay ioctl to define
452  * source and destination rectangle.
453  */
454 
455 struct drm_vmw_rect {
456 	__s32 x;
457 	__s32 y;
458 	__u32 w;
459 	__u32 h;
460 };
461 
462 /**
463  * struct drm_vmw_control_stream_arg
464  *
465  * @stream_id: Stearm to control
466  * @enabled: If false all following arguments are ignored.
467  * @handle: Handle to buffer for getting data from.
468  * @format: Format of the overlay as understood by the host.
469  * @width: Width of the overlay.
470  * @height: Height of the overlay.
471  * @size: Size of the overlay in bytes.
472  * @pitch: Array of pitches, the two last are only used for YUV12 formats.
473  * @offset: Offset from start of dma buffer to overlay.
474  * @src: Source rect, must be within the defined area above.
475  * @dst: Destination rect, x and y may be negative.
476  *
477  * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
478  */
479 
480 struct drm_vmw_control_stream_arg {
481 	__u32 stream_id;
482 	__u32 enabled;
483 
484 	__u32 flags;
485 	__u32 color_key;
486 
487 	__u32 handle;
488 	__u32 offset;
489 	__s32 format;
490 	__u32 size;
491 	__u32 width;
492 	__u32 height;
493 	__u32 pitch[3];
494 
495 	__u32 pad64;
496 	struct drm_vmw_rect src;
497 	struct drm_vmw_rect dst;
498 };
499 
500 /*************************************************************************/
501 /**
502  * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
503  *
504  */
505 
506 #define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
507 #define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
508 
509 /**
510  * struct drm_vmw_cursor_bypass_arg
511  *
512  * @flags: Flags.
513  * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
514  * @xpos: X position of cursor.
515  * @ypos: Y position of cursor.
516  * @xhot: X hotspot.
517  * @yhot: Y hotspot.
518  *
519  * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
520  */
521 
522 struct drm_vmw_cursor_bypass_arg {
523 	__u32 flags;
524 	__u32 crtc_id;
525 	__s32 xpos;
526 	__s32 ypos;
527 	__s32 xhot;
528 	__s32 yhot;
529 };
530 
531 /*************************************************************************/
532 /**
533  * DRM_VMW_CLAIM_STREAM - Claim a single stream.
534  */
535 
536 /**
537  * struct drm_vmw_context_arg
538  *
539  * @stream_id: Device unique context ID.
540  *
541  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
542  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
543  */
544 
545 struct drm_vmw_stream_arg {
546 	__u32 stream_id;
547 	__u32 pad64;
548 };
549 
550 /*************************************************************************/
551 /**
552  * DRM_VMW_UNREF_STREAM - Unclaim a stream.
553  *
554  * Return a single stream that was claimed by this process. Also makes
555  * sure that the stream has been stopped.
556  */
557 
558 /*************************************************************************/
559 /**
560  * DRM_VMW_GET_3D_CAP
561  *
562  * Read 3D capabilities from the FIFO
563  *
564  */
565 
566 /**
567  * struct drm_vmw_get_3d_cap_arg
568  *
569  * @buffer: Pointer to a buffer for capability data, cast to an __u64
570  * @size: Max size to copy
571  *
572  * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
573  * ioctls.
574  */
575 
576 struct drm_vmw_get_3d_cap_arg {
577 	__u64 buffer;
578 	__u32 max_size;
579 	__u32 pad64;
580 };
581 
582 /*************************************************************************/
583 /**
584  * DRM_VMW_FENCE_WAIT
585  *
586  * Waits for a fence object to signal. The wait is interruptible, so that
587  * signals may be delivered during the interrupt. The wait may timeout,
588  * in which case the calls returns -EBUSY. If the wait is restarted,
589  * that is restarting without resetting @cookie_valid to zero,
590  * the timeout is computed from the first call.
591  *
592  * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
593  * on:
594  * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
595  * stream
596  * have executed.
597  * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
598  * commands
599  * in the buffer given to the EXECBUF ioctl returning the fence object handle
600  * are available to user-space.
601  *
602  * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
603  * fenc wait ioctl returns 0, the fence object has been unreferenced after
604  * the wait.
605  */
606 
607 #define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
608 #define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
609 
610 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
611 
612 /**
613  * struct drm_vmw_fence_wait_arg
614  *
615  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
616  * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
617  * @kernel_cookie: Set to 0 on first call. Left alone on restart.
618  * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
619  * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
620  * before returning.
621  * @flags: Fence flags to wait on.
622  * @wait_options: Options that control the behaviour of the wait ioctl.
623  *
624  * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
625  */
626 
627 struct drm_vmw_fence_wait_arg {
628 	__u32 handle;
629 	__s32  cookie_valid;
630 	__u64 kernel_cookie;
631 	__u64 timeout_us;
632 	__s32 lazy;
633 	__s32 flags;
634 	__s32 wait_options;
635 	__s32 pad64;
636 };
637 
638 /*************************************************************************/
639 /**
640  * DRM_VMW_FENCE_SIGNALED
641  *
642  * Checks if a fence object is signaled..
643  */
644 
645 /**
646  * struct drm_vmw_fence_signaled_arg
647  *
648  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
649  * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
650  * @signaled: Out: Flags signaled.
651  * @sequence: Out: Highest sequence passed so far. Can be used to signal the
652  * EXEC flag of user-space fence objects.
653  *
654  * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
655  * ioctls.
656  */
657 
658 struct drm_vmw_fence_signaled_arg {
659 	 __u32 handle;
660 	 __u32 flags;
661 	 __s32 signaled;
662 	 __u32 passed_seqno;
663 	 __u32 signaled_flags;
664 	 __u32 pad64;
665 };
666 
667 /*************************************************************************/
668 /**
669  * DRM_VMW_FENCE_UNREF
670  *
671  * Unreferences a fence object, and causes it to be destroyed if there are no
672  * other references to it.
673  *
674  */
675 
676 /**
677  * struct drm_vmw_fence_arg
678  *
679  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
680  *
681  * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
682  */
683 
684 struct drm_vmw_fence_arg {
685 	 __u32 handle;
686 	 __u32 pad64;
687 };
688 
689 
690 /*************************************************************************/
691 /**
692  * DRM_VMW_FENCE_EVENT
693  *
694  * Queues an event on a fence to be delivered on the drm character device
695  * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
696  * Optionally the approximate time when the fence signaled is
697  * given by the event.
698  */
699 
700 /*
701  * The event type
702  */
703 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
704 
705 struct drm_vmw_event_fence {
706 	struct drm_event base;
707 	__u64 user_data;
708 	__u32 tv_sec;
709 	__u32 tv_usec;
710 };
711 
712 /*
713  * Flags that may be given to the command.
714  */
715 /* Request fence signaled time on the event. */
716 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
717 
718 /**
719  * struct drm_vmw_fence_event_arg
720  *
721  * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
722  * the fence is not supposed to be referenced by user-space.
723  * @user_info: Info to be delivered with the event.
724  * @handle: Attach the event to this fence only.
725  * @flags: A set of flags as defined above.
726  */
727 struct drm_vmw_fence_event_arg {
728 	__u64 fence_rep;
729 	__u64 user_data;
730 	__u32 handle;
731 	__u32 flags;
732 };
733 
734 
735 /*************************************************************************/
736 /**
737  * DRM_VMW_PRESENT
738  *
739  * Executes an SVGA present on a given fb for a given surface. The surface
740  * is placed on the framebuffer. Cliprects are given relative to the given
741  * point (the point disignated by dest_{x|y}).
742  *
743  */
744 
745 /**
746  * struct drm_vmw_present_arg
747  * @fb_id: framebuffer id to present / read back from.
748  * @sid: Surface id to present from.
749  * @dest_x: X placement coordinate for surface.
750  * @dest_y: Y placement coordinate for surface.
751  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
752  * @num_clips: Number of cliprects given relative to the framebuffer origin,
753  * in the same coordinate space as the frame buffer.
754  * @pad64: Unused 64-bit padding.
755  *
756  * Input argument to the DRM_VMW_PRESENT ioctl.
757  */
758 
759 struct drm_vmw_present_arg {
760 	__u32 fb_id;
761 	__u32 sid;
762 	__s32 dest_x;
763 	__s32 dest_y;
764 	__u64 clips_ptr;
765 	__u32 num_clips;
766 	__u32 pad64;
767 };
768 
769 
770 /*************************************************************************/
771 /**
772  * DRM_VMW_PRESENT_READBACK
773  *
774  * Executes an SVGA present readback from a given fb to the dma buffer
775  * currently bound as the fb. If there is no dma buffer bound to the fb,
776  * an error will be returned.
777  *
778  */
779 
780 /**
781  * struct drm_vmw_present_arg
782  * @fb_id: fb_id to present / read back from.
783  * @num_clips: Number of cliprects.
784  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
785  * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
786  * If this member is NULL, then the ioctl should not return a fence.
787  */
788 
789 struct drm_vmw_present_readback_arg {
790 	 __u32 fb_id;
791 	 __u32 num_clips;
792 	 __u64 clips_ptr;
793 	 __u64 fence_rep;
794 };
795 
796 /*************************************************************************/
797 /**
798  * DRM_VMW_UPDATE_LAYOUT - Update layout
799  *
800  * Updates the preferred modes and connection status for connectors. The
801  * command consists of one drm_vmw_update_layout_arg pointing to an array
802  * of num_outputs drm_vmw_rect's.
803  */
804 
805 /**
806  * struct drm_vmw_update_layout_arg
807  *
808  * @num_outputs: number of active connectors
809  * @rects: pointer to array of drm_vmw_rect cast to an __u64
810  *
811  * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
812  */
813 struct drm_vmw_update_layout_arg {
814 	__u32 num_outputs;
815 	__u32 pad64;
816 	__u64 rects;
817 };
818 
819 
820 /*************************************************************************/
821 /**
822  * DRM_VMW_CREATE_SHADER - Create shader
823  *
824  * Creates a shader and optionally binds it to a dma buffer containing
825  * the shader byte-code.
826  */
827 
828 /**
829  * enum drm_vmw_shader_type - Shader types
830  */
831 enum drm_vmw_shader_type {
832 	drm_vmw_shader_type_vs = 0,
833 	drm_vmw_shader_type_ps,
834 };
835 
836 
837 /**
838  * struct drm_vmw_shader_create_arg
839  *
840  * @shader_type: Shader type of the shader to create.
841  * @size: Size of the byte-code in bytes.
842  * where the shader byte-code starts
843  * @buffer_handle: Buffer handle identifying the buffer containing the
844  * shader byte-code
845  * @shader_handle: On successful completion contains a handle that
846  * can be used to subsequently identify the shader.
847  * @offset: Offset in bytes into the buffer given by @buffer_handle,
848  *
849  * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
850  */
851 struct drm_vmw_shader_create_arg {
852 	enum drm_vmw_shader_type shader_type;
853 	__u32 size;
854 	__u32 buffer_handle;
855 	__u32 shader_handle;
856 	__u64 offset;
857 };
858 
859 /*************************************************************************/
860 /**
861  * DRM_VMW_UNREF_SHADER - Unreferences a shader
862  *
863  * Destroys a user-space reference to a shader, optionally destroying
864  * it.
865  */
866 
867 /**
868  * struct drm_vmw_shader_arg
869  *
870  * @handle: Handle identifying the shader to destroy.
871  *
872  * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
873  */
874 struct drm_vmw_shader_arg {
875 	__u32 handle;
876 	__u32 pad64;
877 };
878 
879 /*************************************************************************/
880 /**
881  * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
882  *
883  * Allocates a surface handle and queues a create surface command
884  * for the host on the first use of the surface. The surface ID can
885  * be used as the surface ID in commands referencing the surface.
886  */
887 
888 /**
889  * enum drm_vmw_surface_flags
890  *
891  * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
892  * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
893  *                                      surface.
894  * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
895  *                                      given.
896  * @drm_vmw_surface_flag_coherent:      Back surface with coherent memory.
897  */
898 enum drm_vmw_surface_flags {
899 	drm_vmw_surface_flag_shareable = (1 << 0),
900 	drm_vmw_surface_flag_scanout = (1 << 1),
901 	drm_vmw_surface_flag_create_buffer = (1 << 2),
902 	drm_vmw_surface_flag_coherent = (1 << 3),
903 };
904 
905 /**
906  * struct drm_vmw_gb_surface_create_req
907  *
908  * @svga3d_flags:     SVGA3d surface flags for the device.
909  * @format:           SVGA3d format.
910  * @mip_level:        Number of mip levels for all faces.
911  * @drm_surface_flags Flags as described above.
912  * @multisample_count Future use. Set to 0.
913  * @autogen_filter    Future use. Set to 0.
914  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
915  *                    if none.
916  * @base_size         Size of the base mip level for all faces.
917  * @array_size        Must be zero for non-DX hardware, and if non-zero
918  *                    svga3d_flags must have proper bind flags setup.
919  *
920  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
921  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
922  */
923 struct drm_vmw_gb_surface_create_req {
924 	__u32 svga3d_flags;
925 	__u32 format;
926 	__u32 mip_levels;
927 	enum drm_vmw_surface_flags drm_surface_flags;
928 	__u32 multisample_count;
929 	__u32 autogen_filter;
930 	__u32 buffer_handle;
931 	__u32 array_size;
932 	struct drm_vmw_size base_size;
933 };
934 
935 /**
936  * struct drm_vmw_gb_surface_create_rep
937  *
938  * @handle:            Surface handle.
939  * @backup_size:       Size of backup buffers for this surface.
940  * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
941  * @buffer_size:       Actual size of the buffer identified by
942  *                     @buffer_handle
943  * @buffer_map_handle: Offset into device address space for the buffer
944  *                     identified by @buffer_handle.
945  *
946  * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
947  * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
948  */
949 struct drm_vmw_gb_surface_create_rep {
950 	__u32 handle;
951 	__u32 backup_size;
952 	__u32 buffer_handle;
953 	__u32 buffer_size;
954 	__u64 buffer_map_handle;
955 };
956 
957 /**
958  * union drm_vmw_gb_surface_create_arg
959  *
960  * @req: Input argument as described above.
961  * @rep: Output argument as described above.
962  *
963  * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
964  */
965 union drm_vmw_gb_surface_create_arg {
966 	struct drm_vmw_gb_surface_create_rep rep;
967 	struct drm_vmw_gb_surface_create_req req;
968 };
969 
970 /*************************************************************************/
971 /**
972  * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
973  *
974  * Puts a reference on a host surface with a given handle, as previously
975  * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
976  * A reference will make sure the surface isn't destroyed while we hold
977  * it and will allow the calling client to use the surface handle in
978  * the command stream.
979  *
980  * On successful return, the Ioctl returns the surface information given
981  * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
982  */
983 
984 /**
985  * struct drm_vmw_gb_surface_reference_arg
986  *
987  * @creq: The data used as input when the surface was created, as described
988  *        above at "struct drm_vmw_gb_surface_create_req"
989  * @crep: Additional data output when the surface was created, as described
990  *        above at "struct drm_vmw_gb_surface_create_rep"
991  *
992  * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
993  */
994 struct drm_vmw_gb_surface_ref_rep {
995 	struct drm_vmw_gb_surface_create_req creq;
996 	struct drm_vmw_gb_surface_create_rep crep;
997 };
998 
999 /**
1000  * union drm_vmw_gb_surface_reference_arg
1001  *
1002  * @req: Input data as described above at "struct drm_vmw_surface_arg"
1003  * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1004  *
1005  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1006  */
1007 union drm_vmw_gb_surface_reference_arg {
1008 	struct drm_vmw_gb_surface_ref_rep rep;
1009 	struct drm_vmw_surface_arg req;
1010 };
1011 
1012 
1013 /*************************************************************************/
1014 /**
1015  * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1016  *
1017  * Idles any previously submitted GPU operations on the buffer and
1018  * by default blocks command submissions that reference the buffer.
1019  * If the file descriptor used to grab a blocking CPU sync is closed, the
1020  * cpu sync is released.
1021  * The flags argument indicates how the grab / release operation should be
1022  * performed:
1023  */
1024 
1025 /**
1026  * enum drm_vmw_synccpu_flags - Synccpu flags:
1027  *
1028  * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1029  * hint to the kernel to allow command submissions that references the buffer
1030  * for read-only.
1031  * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1032  * referencing this buffer.
1033  * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1034  * -EBUSY should the buffer be busy.
1035  * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1036  * while the buffer is synced for CPU. This is similar to the GEM bo idle
1037  * behavior.
1038  */
1039 enum drm_vmw_synccpu_flags {
1040 	drm_vmw_synccpu_read = (1 << 0),
1041 	drm_vmw_synccpu_write = (1 << 1),
1042 	drm_vmw_synccpu_dontblock = (1 << 2),
1043 	drm_vmw_synccpu_allow_cs = (1 << 3)
1044 };
1045 
1046 /**
1047  * enum drm_vmw_synccpu_op - Synccpu operations:
1048  *
1049  * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
1050  * @drm_vmw_synccpu_release: Release a previous grab.
1051  */
1052 enum drm_vmw_synccpu_op {
1053 	drm_vmw_synccpu_grab,
1054 	drm_vmw_synccpu_release
1055 };
1056 
1057 /**
1058  * struct drm_vmw_synccpu_arg
1059  *
1060  * @op:			     The synccpu operation as described above.
1061  * @handle:		     Handle identifying the buffer object.
1062  * @flags:		     Flags as described above.
1063  */
1064 struct drm_vmw_synccpu_arg {
1065 	enum drm_vmw_synccpu_op op;
1066 	enum drm_vmw_synccpu_flags flags;
1067 	__u32 handle;
1068 	__u32 pad64;
1069 };
1070 
1071 /*************************************************************************/
1072 /**
1073  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1074  *
1075  * Allocates a device unique context id, and queues a create context command
1076  * for the host. Does not wait for host completion.
1077  */
1078 enum drm_vmw_extended_context {
1079 	drm_vmw_context_legacy,
1080 	drm_vmw_context_dx
1081 };
1082 
1083 /**
1084  * union drm_vmw_extended_context_arg
1085  *
1086  * @req: Context type.
1087  * @rep: Context identifier.
1088  *
1089  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1090  */
1091 union drm_vmw_extended_context_arg {
1092 	enum drm_vmw_extended_context req;
1093 	struct drm_vmw_context_arg rep;
1094 };
1095 
1096 /*************************************************************************/
1097 /*
1098  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1099  * underlying resource.
1100  *
1101  * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1102  * Ioctl.
1103  */
1104 
1105 /**
1106  * struct drm_vmw_handle_close_arg
1107  *
1108  * @handle: Handle to close.
1109  *
1110  * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1111  */
1112 struct drm_vmw_handle_close_arg {
1113 	__u32 handle;
1114 	__u32 pad64;
1115 };
1116 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1117 
1118 /*************************************************************************/
1119 /**
1120  * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1121  *
1122  * Allocates a surface handle and queues a create surface command
1123  * for the host on the first use of the surface. The surface ID can
1124  * be used as the surface ID in commands referencing the surface.
1125  *
1126  * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1127  * parameter and 64 bit svga flag.
1128  */
1129 
1130 /**
1131  * enum drm_vmw_surface_version
1132  *
1133  * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1134  * svga3d surface flags split into 2, upper half and lower half.
1135  */
1136 enum drm_vmw_surface_version {
1137 	drm_vmw_gb_surface_v1
1138 };
1139 
1140 /**
1141  * struct drm_vmw_gb_surface_create_ext_req
1142  *
1143  * @base: Surface create parameters.
1144  * @version: Version of surface create ioctl.
1145  * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1146  * @multisample_pattern: Multisampling pattern when msaa is supported.
1147  * @quality_level: Precision settings for each sample.
1148  * @buffer_byte_stride: Buffer byte stride.
1149  * @must_be_zero: Reserved for future usage.
1150  *
1151  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1152  * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1153  */
1154 struct drm_vmw_gb_surface_create_ext_req {
1155 	struct drm_vmw_gb_surface_create_req base;
1156 	enum drm_vmw_surface_version version;
1157 	__u32 svga3d_flags_upper_32_bits;
1158 	__u32 multisample_pattern;
1159 	__u32 quality_level;
1160 	__u32 buffer_byte_stride;
1161 	__u32 must_be_zero;
1162 };
1163 
1164 /**
1165  * union drm_vmw_gb_surface_create_ext_arg
1166  *
1167  * @req: Input argument as described above.
1168  * @rep: Output argument as described above.
1169  *
1170  * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1171  */
1172 union drm_vmw_gb_surface_create_ext_arg {
1173 	struct drm_vmw_gb_surface_create_rep rep;
1174 	struct drm_vmw_gb_surface_create_ext_req req;
1175 };
1176 
1177 /*************************************************************************/
1178 /**
1179  * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1180  *
1181  * Puts a reference on a host surface with a given handle, as previously
1182  * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1183  * A reference will make sure the surface isn't destroyed while we hold
1184  * it and will allow the calling client to use the surface handle in
1185  * the command stream.
1186  *
1187  * On successful return, the Ioctl returns the surface information given
1188  * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1189  */
1190 
1191 /**
1192  * struct drm_vmw_gb_surface_ref_ext_rep
1193  *
1194  * @creq: The data used as input when the surface was created, as described
1195  *        above at "struct drm_vmw_gb_surface_create_ext_req"
1196  * @crep: Additional data output when the surface was created, as described
1197  *        above at "struct drm_vmw_gb_surface_create_rep"
1198  *
1199  * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1200  */
1201 struct drm_vmw_gb_surface_ref_ext_rep {
1202 	struct drm_vmw_gb_surface_create_ext_req creq;
1203 	struct drm_vmw_gb_surface_create_rep crep;
1204 };
1205 
1206 /**
1207  * union drm_vmw_gb_surface_reference_ext_arg
1208  *
1209  * @req: Input data as described above at "struct drm_vmw_surface_arg"
1210  * @rep: Output data as described above at
1211  *       "struct drm_vmw_gb_surface_ref_ext_rep"
1212  *
1213  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1214  */
1215 union drm_vmw_gb_surface_reference_ext_arg {
1216 	struct drm_vmw_gb_surface_ref_ext_rep rep;
1217 	struct drm_vmw_surface_arg req;
1218 };
1219 
1220 /**
1221  * struct drm_vmw_msg_arg
1222  *
1223  * @send: Pointer to user-space msg string (null terminated).
1224  * @receive: Pointer to user-space receive buffer.
1225  * @send_only: Boolean whether this is only sending or receiving too.
1226  *
1227  * Argument to the DRM_VMW_MSG ioctl.
1228  */
1229 struct drm_vmw_msg_arg {
1230 	__u64 send;
1231 	__u64 receive;
1232 	__s32 send_only;
1233 	__u32 receive_len;
1234 };
1235 
1236 #if defined(__cplusplus)
1237 }
1238 #endif
1239 
1240 #endif
1241