• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************
2  * Copyright 2009-2015 VMware, Inc.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **********************************************************/
25 
26 /**
27  * @file
28  *
29  * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
30  * drm winsys.
31  *
32  * Based on svgaicd_escape.c
33  */
34 
35 
36 #include "svga_cmd.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "frontend/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
44 #include "xf86drm.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
48 
49 #include "util/os_mman.h"
50 
51 #include <errno.h>
52 #include <unistd.h>
53 
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE   (128 * 1024 * 1024)
55 #define VMW_FENCE_TIMEOUT_SECONDS 3600UL
56 
57 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
58 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
59 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
60    (svga3d_flags & ((uint64_t)UINT32_MAX))
61 
62 struct vmw_region
63 {
64    uint32_t handle;
65    uint64_t map_handle;
66    void *data;
67    uint32_t map_count;
68    int drm_fd;
69    uint32_t size;
70 };
71 
72 uint32_t
vmw_region_size(struct vmw_region * region)73 vmw_region_size(struct vmw_region *region)
74 {
75    return region->size;
76 }
77 
78 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
79     defined(__NetBSD__) || defined(__OpenBSD__)
80 #define ERESTART EINTR
81 #endif
82 
83 uint32
vmw_ioctl_context_create(struct vmw_winsys_screen * vws)84 vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
85 {
86    struct drm_vmw_context_arg c_arg;
87    int ret;
88 
89    VMW_FUNC;
90 
91    ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
92 			&c_arg, sizeof(c_arg));
93 
94    if (ret)
95       return -1;
96 
97    vmw_printf("Context id is %d\n", c_arg.cid);
98    return c_arg.cid;
99 }
100 
101 uint32
vmw_ioctl_extended_context_create(struct vmw_winsys_screen * vws,bool vgpu10)102 vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
103                                   bool vgpu10)
104 {
105    union drm_vmw_extended_context_arg c_arg;
106    int ret;
107 
108    VMW_FUNC;
109    memset(&c_arg, 0, sizeof(c_arg));
110    c_arg.req = (vgpu10 ? drm_vmw_context_dx : drm_vmw_context_legacy);
111    ret = drmCommandWriteRead(vws->ioctl.drm_fd,
112                              DRM_VMW_CREATE_EXTENDED_CONTEXT,
113                              &c_arg, sizeof(c_arg));
114 
115    if (ret)
116       return -1;
117 
118    vmw_printf("Context id is %d\n", c_arg.cid);
119    return c_arg.rep.cid;
120 }
121 
122 void
vmw_ioctl_context_destroy(struct vmw_winsys_screen * vws,uint32 cid)123 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
124 {
125    struct drm_vmw_context_arg c_arg;
126 
127    VMW_FUNC;
128 
129    memset(&c_arg, 0, sizeof(c_arg));
130    c_arg.cid = cid;
131 
132    (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
133 			 &c_arg, sizeof(c_arg));
134 
135 }
136 
137 uint32
vmw_ioctl_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurface1Flags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount)138 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
139                          SVGA3dSurface1Flags flags,
140                          SVGA3dSurfaceFormat format,
141                          unsigned usage,
142                          SVGA3dSize size,
143                          uint32_t numFaces, uint32_t numMipLevels,
144                          unsigned sampleCount)
145 {
146    union drm_vmw_surface_create_arg s_arg;
147    struct drm_vmw_surface_create_req *req = &s_arg.req;
148    struct drm_vmw_surface_arg *rep = &s_arg.rep;
149    struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
150 			     DRM_VMW_MAX_MIP_LEVELS];
151    struct drm_vmw_size *cur_size;
152    uint32_t iFace;
153    uint32_t iMipLevel;
154    int ret;
155 
156    vmw_printf("%s flags %d format %d\n", __func__, flags, format);
157 
158    memset(&s_arg, 0, sizeof(s_arg));
159    req->flags = (uint32_t) flags;
160    req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT);
161    req->format = (uint32_t) format;
162    req->shareable = true;
163 
164    assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
165 	  DRM_VMW_MAX_MIP_LEVELS);
166    cur_size = sizes;
167    for (iFace = 0; iFace < numFaces; ++iFace) {
168       SVGA3dSize mipSize = size;
169 
170       req->mip_levels[iFace] = numMipLevels;
171       for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
172 	 cur_size->width = mipSize.width;
173 	 cur_size->height = mipSize.height;
174 	 cur_size->depth = mipSize.depth;
175 	 mipSize.width = MAX2(mipSize.width >> 1, 1);
176 	 mipSize.height = MAX2(mipSize.height >> 1, 1);
177 	 mipSize.depth = MAX2(mipSize.depth >> 1, 1);
178 	 cur_size++;
179       }
180    }
181    for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
182       req->mip_levels[iFace] = 0;
183    }
184 
185    req->size_addr = (unsigned long)&sizes;
186 
187    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
188 			     &s_arg, sizeof(s_arg));
189 
190    if (ret)
191       return -1;
192 
193    vmw_printf("Surface id is %d\n", rep->sid);
194 
195    return rep->sid;
196 }
197 
198 
199 uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurfaceAllFlags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount,uint32_t buffer_handle,SVGA3dMSPattern multisamplePattern,SVGA3dMSQualityLevel qualityLevel,struct vmw_region ** p_region)200 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
201                             SVGA3dSurfaceAllFlags flags,
202                             SVGA3dSurfaceFormat format,
203                             unsigned usage,
204                             SVGA3dSize size,
205                             uint32_t numFaces,
206                             uint32_t numMipLevels,
207                             unsigned sampleCount,
208                             uint32_t buffer_handle,
209                             SVGA3dMSPattern multisamplePattern,
210                             SVGA3dMSQualityLevel qualityLevel,
211                             struct vmw_region **p_region)
212 {
213    union {
214       union drm_vmw_gb_surface_create_ext_arg ext_arg;
215       union drm_vmw_gb_surface_create_arg arg;
216    } s_arg;
217    struct drm_vmw_gb_surface_create_rep *rep;
218    struct vmw_region *region = NULL;
219    int ret;
220 
221    vmw_printf("%s flags %d format %d\n", __func__, flags, format);
222 
223    if (p_region) {
224       region = CALLOC_STRUCT(vmw_region);
225       if (!region)
226          return SVGA3D_INVALID_ID;
227    }
228 
229    memset(&s_arg, 0, sizeof(s_arg));
230 
231    if (vws->ioctl.have_drm_2_15) {
232       struct drm_vmw_gb_surface_create_ext_req *req = &s_arg.ext_arg.req;
233       rep = &s_arg.ext_arg.rep;
234 
235       req->version = drm_vmw_gb_surface_v1;
236       req->multisample_pattern = multisamplePattern;
237       req->quality_level = qualityLevel;
238       req->buffer_byte_stride = 0;
239       req->must_be_zero = 0;
240       req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
241       req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
242       req->base.format = (uint32_t) format;
243 
244       if (usage & SVGA_SURFACE_USAGE_SCANOUT)
245          req->base.drm_surface_flags |= drm_vmw_surface_flag_scanout;
246 
247       if ((usage & SVGA_SURFACE_USAGE_COHERENT) || vws->force_coherent)
248          req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
249 
250       req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
251       req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
252       req->base.base_size.width = size.width;
253       req->base.base_size.height = size.height;
254       req->base.base_size.depth = size.depth;
255       req->base.mip_levels = numMipLevels;
256       req->base.multisample_count = 0;
257       req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
258 
259       if (vws->base.have_vgpu10) {
260          req->base.array_size = numFaces;
261          req->base.multisample_count = sampleCount;
262       } else {
263          assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
264 	        DRM_VMW_MAX_MIP_LEVELS);
265          req->base.array_size = 0;
266       }
267 
268       req->base.buffer_handle = buffer_handle ?
269          buffer_handle : SVGA3D_INVALID_ID;
270 
271       ret = drmCommandWriteRead(vws->ioctl.drm_fd,
272                                 DRM_VMW_GB_SURFACE_CREATE_EXT, &s_arg.ext_arg,
273                                 sizeof(s_arg.ext_arg));
274 
275       if (ret)
276          goto out_fail_create;
277    } else {
278       struct drm_vmw_gb_surface_create_req *req = &s_arg.arg.req;
279       rep = &s_arg.arg.rep;
280 
281       req->svga3d_flags = (uint32_t) flags;
282       req->format = (uint32_t) format;
283 
284       if (usage & SVGA_SURFACE_USAGE_SCANOUT)
285          req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
286 
287       req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
288 
289       req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
290       req->base_size.width = size.width;
291       req->base_size.height = size.height;
292       req->base_size.depth = size.depth;
293       req->mip_levels = numMipLevels;
294       req->multisample_count = 0;
295       req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
296 
297       if (vws->base.have_vgpu10) {
298          req->array_size = numFaces;
299          req->multisample_count = sampleCount;
300       } else {
301          assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
302 	        DRM_VMW_MAX_MIP_LEVELS);
303          req->array_size = 0;
304       }
305 
306       req->buffer_handle = buffer_handle ?
307          buffer_handle : SVGA3D_INVALID_ID;
308 
309       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
310 			        &s_arg.arg, sizeof(s_arg.arg));
311 
312       if (ret)
313          goto out_fail_create;
314    }
315 
316    if (p_region) {
317       region->handle = rep->buffer_handle;
318       region->map_handle = rep->buffer_map_handle;
319       region->drm_fd = vws->ioctl.drm_fd;
320       region->size = rep->backup_size;
321       *p_region = region;
322    }
323 
324    vmw_printf("Surface id is %d\n", rep->sid);
325    return rep->handle;
326 
327 out_fail_create:
328    FREE(region);
329    return SVGA3D_INVALID_ID;
330 }
331 
332 /**
333  * vmw_ioctl_surface_req - Fill in a struct surface_req
334  *
335  * @vws: Winsys screen
336  * @whandle: Surface handle
337  * @req: The struct surface req to fill in
338  * @needs_unref: This call takes a kernel surface reference that needs to
339  * be unreferenced.
340  *
341  * Returns 0 on success, negative error type otherwise.
342  * Fills in the surface_req structure according to handle type and kernel
343  * capabilities.
344  */
345 static int
vmw_ioctl_surface_req(const struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,struct drm_vmw_surface_arg * req,bool * needs_unref)346 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
347                       const struct winsys_handle *whandle,
348                       struct drm_vmw_surface_arg *req,
349                       bool *needs_unref)
350 {
351    int ret;
352 
353    switch(whandle->type) {
354    case WINSYS_HANDLE_TYPE_SHARED:
355    case WINSYS_HANDLE_TYPE_KMS:
356       *needs_unref = false;
357       req->handle_type = DRM_VMW_HANDLE_LEGACY;
358       req->sid = whandle->handle;
359       break;
360    case WINSYS_HANDLE_TYPE_FD:
361       if (!vws->ioctl.have_drm_2_6) {
362          uint32_t handle;
363 
364          ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
365          if (ret) {
366             vmw_error("Failed to get handle from prime fd %d.\n",
367                       (int) whandle->handle);
368             return -EINVAL;
369          }
370 
371          *needs_unref = true;
372          req->handle_type = DRM_VMW_HANDLE_LEGACY;
373          req->sid = handle;
374       } else {
375          *needs_unref = false;
376          req->handle_type = DRM_VMW_HANDLE_PRIME;
377          req->sid = whandle->handle;
378       }
379       break;
380    default:
381       vmw_error("Attempt to import unsupported handle type %d.\n",
382                 whandle->type);
383       return -EINVAL;
384    }
385 
386    return 0;
387 }
388 
389 /**
390  * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
391  * get surface information
392  *
393  * @vws: Screen to register the reference on
394  * @handle: Kernel handle of the guest-backed surface
395  * @flags: flags used when the surface was created
396  * @format: Format used when the surface was created
397  * @numMipLevels: Number of mipmap levels of the surface
398  * @p_region: On successful return points to a newly allocated
399  * struct vmw_region holding a reference to the surface backup buffer.
400  *
401  * Returns 0 on success, a system error on failure.
402  */
403 int
vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,SVGA3dSurfaceAllFlags * flags,SVGA3dSurfaceFormat * format,uint32_t * numMipLevels,uint32_t * handle,struct vmw_region ** p_region)404 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
405                          const struct winsys_handle *whandle,
406                          SVGA3dSurfaceAllFlags *flags,
407                          SVGA3dSurfaceFormat *format,
408                          uint32_t *numMipLevels,
409                          uint32_t *handle,
410                          struct vmw_region **p_region)
411 {
412    struct vmw_region *region = NULL;
413    bool needs_unref = false;
414    int ret;
415 
416    assert(p_region != NULL);
417    region = CALLOC_STRUCT(vmw_region);
418    if (!region)
419       return -ENOMEM;
420 
421    if (vws->ioctl.have_drm_2_15) {
422       union drm_vmw_gb_surface_reference_ext_arg s_arg;
423       struct drm_vmw_surface_arg *req = &s_arg.req;
424       struct drm_vmw_gb_surface_ref_ext_rep *rep = &s_arg.rep;
425 
426       memset(&s_arg, 0, sizeof(s_arg));
427       ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
428       if (ret)
429          goto out_fail_req;
430 
431       *handle = req->sid;
432       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF_EXT,
433 			        &s_arg, sizeof(s_arg));
434 
435       if (ret)
436          goto out_fail_ref;
437 
438       region->handle = rep->crep.buffer_handle;
439       region->map_handle = rep->crep.buffer_map_handle;
440       region->drm_fd = vws->ioctl.drm_fd;
441       region->size = rep->crep.backup_size;
442       *p_region = region;
443 
444       *handle = rep->crep.handle;
445       *flags = SVGA3D_FLAGS_64(rep->creq.svga3d_flags_upper_32_bits,
446                                rep->creq.base.svga3d_flags);
447       *format = rep->creq.base.format;
448       *numMipLevels = rep->creq.base.mip_levels;
449    } else {
450       union drm_vmw_gb_surface_reference_arg s_arg;
451       struct drm_vmw_surface_arg *req = &s_arg.req;
452       struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
453 
454       memset(&s_arg, 0, sizeof(s_arg));
455       ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
456       if (ret)
457          goto out_fail_req;
458 
459       *handle = req->sid;
460       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
461 			        &s_arg, sizeof(s_arg));
462 
463       if (ret)
464          goto out_fail_ref;
465 
466       region->handle = rep->crep.buffer_handle;
467       region->map_handle = rep->crep.buffer_map_handle;
468       region->drm_fd = vws->ioctl.drm_fd;
469       region->size = rep->crep.backup_size;
470       *p_region = region;
471 
472       *handle = rep->crep.handle;
473       *flags = rep->creq.svga3d_flags;
474       *format = rep->creq.format;
475       *numMipLevels = rep->creq.mip_levels;
476    }
477 
478    vmw_printf("%s flags %d format %d\n", __func__, *flags, *format);
479 
480    if (needs_unref)
481       vmw_ioctl_surface_destroy(vws, *handle);
482 
483    return 0;
484 out_fail_ref:
485    if (needs_unref)
486       vmw_ioctl_surface_destroy(vws, *handle);
487 out_fail_req:
488    FREE(region);
489    return ret;
490 }
491 
492 void
vmw_ioctl_surface_destroy(struct vmw_winsys_screen * vws,uint32 sid)493 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
494 {
495    struct drm_vmw_surface_arg s_arg;
496 
497    VMW_FUNC;
498 
499    memset(&s_arg, 0, sizeof(s_arg));
500    s_arg.sid = sid;
501 
502    (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
503 			 &s_arg, sizeof(s_arg));
504 }
505 
506 void
vmw_ioctl_command(struct vmw_winsys_screen * vws,int32_t cid,uint32_t throttle_us,void * commands,uint32_t size,struct pipe_fence_handle ** pfence,int32_t imported_fence_fd,uint32_t flags)507 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
508                   uint32_t throttle_us, void *commands, uint32_t size,
509                   struct pipe_fence_handle **pfence, int32_t imported_fence_fd,
510                   uint32_t flags)
511 {
512    struct drm_vmw_execbuf_arg arg;
513    struct drm_vmw_fence_rep rep;
514    int ret;
515    int argsize;
516 
517 #ifdef DEBUG
518    {
519       static bool firsttime = true;
520       static bool debug = false;
521       static bool skip = false;
522       if (firsttime) {
523          debug = debug_get_bool_option("SVGA_DUMP_CMD", false);
524          skip = debug_get_bool_option("SVGA_SKIP_CMD", false);
525       }
526       if (debug) {
527          VMW_FUNC;
528          svga_dump_commands(commands, size);
529       }
530       firsttime = false;
531       if (skip) {
532          size = 0;
533       }
534    }
535 #endif
536 
537    memset(&arg, 0, sizeof(arg));
538    memset(&rep, 0, sizeof(rep));
539 
540    if (flags & SVGA_HINT_FLAG_EXPORT_FENCE_FD) {
541       arg.flags |= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD;
542    }
543 
544    if (imported_fence_fd != -1) {
545       arg.flags |= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD;
546    }
547 
548    rep.error = -EFAULT;
549    if (pfence)
550       arg.fence_rep = (unsigned long)&rep;
551    arg.commands = (unsigned long)commands;
552    arg.command_size = size;
553    arg.throttle_us = throttle_us;
554    arg.version = vws->ioctl.drm_execbuf_version;
555    arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID);
556 
557    /* Older DRM module requires this to be zero */
558    if (vws->base.have_fence_fd)
559       arg.imported_fence_fd = imported_fence_fd;
560 
561    /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
562     * the flags field. The structure size sent to drmCommandWrite must match
563     * the drm_execbuf_version. Otherwise, an invalid value will be returned.
564     */
565    argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) :
566                 offsetof(struct drm_vmw_execbuf_arg, context_handle);
567    do {
568        ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize);
569        if (ret == -EBUSY)
570           usleep(1000);
571    } while(ret == -ERESTART || ret == -EBUSY);
572    if (ret) {
573       vmw_error("%s error %s.\n", __func__, strerror(-ret));
574       abort();
575    }
576 
577    if (rep.error) {
578 
579       /*
580        * Kernel has already synced, or caller requested no fence.
581        */
582       if (pfence)
583 	 *pfence = NULL;
584    } else {
585       if (pfence) {
586          vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
587                            true);
588 
589          /* Older DRM module will set this to zero, but -1 is the proper FD
590           * to use for no Fence FD support */
591          if (!vws->base.have_fence_fd)
592             rep.fd = -1;
593 
594          *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
595                                     rep.seqno, rep.mask, rep.fd);
596          if (*pfence == NULL) {
597             /*
598              * Fence creation failed. Need to sync.
599              */
600             (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
601             vmw_ioctl_fence_unref(vws, rep.handle);
602          }
603       }
604    }
605 }
606 
607 
608 struct vmw_region *
vmw_ioctl_region_create(struct vmw_winsys_screen * vws,uint32_t size)609 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
610 {
611    struct vmw_region *region;
612    union drm_vmw_alloc_dmabuf_arg arg;
613    struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
614    struct drm_vmw_dmabuf_rep *rep = &arg.rep;
615    int ret;
616 
617    vmw_printf("%s: size = %u\n", __func__, size);
618 
619    region = CALLOC_STRUCT(vmw_region);
620    if (!region)
621       goto out_err1;
622 
623    memset(&arg, 0, sizeof(arg));
624    req->size = size;
625    do {
626       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
627 				sizeof(arg));
628    } while (ret == -ERESTART);
629 
630    if (ret) {
631       vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
632       goto out_err1;
633    }
634 
635    region->data = NULL;
636    region->handle = rep->handle;
637    region->map_handle = rep->map_handle;
638    region->map_count = 0;
639    region->size = size;
640    region->drm_fd = vws->ioctl.drm_fd;
641 
642    vmw_printf("   gmrId = %u, offset = %u\n",
643               region->ptr.gmrId, region->ptr.offset);
644 
645    return region;
646 
647  out_err1:
648    FREE(region);
649    return NULL;
650 }
651 
652 void
vmw_ioctl_region_destroy(struct vmw_region * region)653 vmw_ioctl_region_destroy(struct vmw_region *region)
654 {
655    struct drm_vmw_unref_dmabuf_arg arg;
656 
657    vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
658               region->ptr.gmrId, region->ptr.offset);
659 
660    if (region->data) {
661       os_munmap(region->data, region->size);
662       region->data = NULL;
663    }
664 
665    memset(&arg, 0, sizeof(arg));
666    arg.handle = region->handle;
667    drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
668 
669    FREE(region);
670 }
671 
672 SVGAGuestPtr
vmw_ioctl_region_ptr(struct vmw_region * region)673 vmw_ioctl_region_ptr(struct vmw_region *region)
674 {
675    SVGAGuestPtr ptr = {region->handle, 0};
676    return ptr;
677 }
678 
679 void *
vmw_ioctl_region_map(struct vmw_region * region)680 vmw_ioctl_region_map(struct vmw_region *region)
681 {
682    void *map;
683 
684    vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
685               region->ptr.gmrId, region->ptr.offset);
686 
687    if (region->data == NULL) {
688       map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
689 		 region->drm_fd, region->map_handle);
690       if (map == MAP_FAILED) {
691 	 vmw_error("%s: Map failed.\n", __func__);
692 	 return NULL;
693       }
694 
695 // MADV_HUGEPAGE only exists on Linux
696 #ifdef MADV_HUGEPAGE
697       (void) madvise(map, region->size, MADV_HUGEPAGE);
698 #endif
699       region->data = map;
700    }
701 
702    ++region->map_count;
703 
704    return region->data;
705 }
706 
707 void
vmw_ioctl_region_unmap(struct vmw_region * region)708 vmw_ioctl_region_unmap(struct vmw_region *region)
709 {
710    vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
711               region->ptr.gmrId, region->ptr.offset);
712 
713    --region->map_count;
714    os_munmap(region->data, region->size);
715    region->data = NULL;
716 }
717 
718 /**
719  * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
720  *
721  * @region: Pointer to a struct vmw_region representing the buffer object.
722  * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
723  * GPU is busy with the buffer object.
724  * @readonly: Hint that the CPU access is read-only.
725  * @allow_cs: Allow concurrent command submission while the buffer is
726  * synchronized for CPU. If FALSE command submissions referencing the
727  * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
728  *
729  * This function idles any GPU activities touching the buffer and blocks
730  * command submission of commands referencing the buffer, even from
731  * other processes.
732  */
733 int
vmw_ioctl_syncforcpu(struct vmw_region * region,bool dont_block,bool readonly,bool allow_cs)734 vmw_ioctl_syncforcpu(struct vmw_region *region,
735                      bool dont_block,
736                      bool readonly,
737                      bool allow_cs)
738 {
739    struct drm_vmw_synccpu_arg arg;
740 
741    memset(&arg, 0, sizeof(arg));
742    arg.op = drm_vmw_synccpu_grab;
743    arg.handle = region->handle;
744    arg.flags = drm_vmw_synccpu_read;
745    if (!readonly)
746       arg.flags |= drm_vmw_synccpu_write;
747    if (dont_block)
748       arg.flags |= drm_vmw_synccpu_dontblock;
749    if (allow_cs)
750       arg.flags |= drm_vmw_synccpu_allow_cs;
751 
752    return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
753 }
754 
755 /**
756  * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
757  *
758  * @region: Pointer to a struct vmw_region representing the buffer object.
759  * @readonly: Should hold the same value as the matching syncforcpu call.
760  * @allow_cs: Should hold the same value as the matching syncforcpu call.
761  */
762 void
vmw_ioctl_releasefromcpu(struct vmw_region * region,bool readonly,bool allow_cs)763 vmw_ioctl_releasefromcpu(struct vmw_region *region,
764                          bool readonly,
765                          bool allow_cs)
766 {
767    struct drm_vmw_synccpu_arg arg;
768 
769    memset(&arg, 0, sizeof(arg));
770    arg.op = drm_vmw_synccpu_release;
771    arg.handle = region->handle;
772    arg.flags = drm_vmw_synccpu_read;
773    if (!readonly)
774       arg.flags |= drm_vmw_synccpu_write;
775    if (allow_cs)
776       arg.flags |= drm_vmw_synccpu_allow_cs;
777 
778    (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
779 }
780 
781 void
vmw_ioctl_fence_unref(struct vmw_winsys_screen * vws,uint32_t handle)782 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
783 		      uint32_t handle)
784 {
785    struct drm_vmw_fence_arg arg;
786    int ret;
787 
788    memset(&arg, 0, sizeof(arg));
789    arg.handle = handle;
790 
791    ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
792 			 &arg, sizeof(arg));
793    if (ret != 0)
794       vmw_error("%s Failed\n", __func__);
795 }
796 
797 static inline uint32_t
vmw_drm_fence_flags(uint32_t flags)798 vmw_drm_fence_flags(uint32_t flags)
799 {
800     uint32_t dflags = 0;
801 
802     if (flags & SVGA_FENCE_FLAG_EXEC)
803 	dflags |= DRM_VMW_FENCE_FLAG_EXEC;
804     if (flags & SVGA_FENCE_FLAG_QUERY)
805 	dflags |= DRM_VMW_FENCE_FLAG_QUERY;
806 
807     return dflags;
808 }
809 
810 
811 int
vmw_ioctl_fence_signalled(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)812 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
813 			  uint32_t handle,
814 			  uint32_t flags)
815 {
816    struct drm_vmw_fence_signaled_arg arg;
817    uint32_t vflags = vmw_drm_fence_flags(flags);
818    int ret;
819 
820    memset(&arg, 0, sizeof(arg));
821    arg.handle = handle;
822    arg.flags = vflags;
823 
824    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
825 			     &arg, sizeof(arg));
826 
827    if (ret != 0)
828       return ret;
829 
830    vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, false);
831 
832    return (arg.signaled) ? 0 : -1;
833 }
834 
835 
836 
837 int
vmw_ioctl_fence_finish(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)838 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
839                        uint32_t handle,
840 		       uint32_t flags)
841 {
842    struct drm_vmw_fence_wait_arg arg;
843    uint32_t vflags = vmw_drm_fence_flags(flags);
844    int ret;
845 
846    memset(&arg, 0, sizeof(arg));
847 
848    arg.handle = handle;
849    arg.timeout_us = VMW_FENCE_TIMEOUT_SECONDS*1000000;
850    arg.lazy = 0;
851    arg.flags = vflags;
852 
853    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
854 			     &arg, sizeof(arg));
855 
856    if (ret != 0)
857       vmw_error("%s Failed\n", __func__);
858 
859    return 0;
860 }
861 
862 uint32
vmw_ioctl_shader_create(struct vmw_winsys_screen * vws,SVGA3dShaderType type,uint32 code_len)863 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
864 			SVGA3dShaderType type,
865 			uint32 code_len)
866 {
867    struct drm_vmw_shader_create_arg sh_arg;
868    int ret;
869 
870    VMW_FUNC;
871 
872    memset(&sh_arg, 0, sizeof(sh_arg));
873 
874    sh_arg.size = code_len;
875    sh_arg.buffer_handle = SVGA3D_INVALID_ID;
876    sh_arg.shader_handle = SVGA3D_INVALID_ID;
877    switch (type) {
878    case SVGA3D_SHADERTYPE_VS:
879       sh_arg.shader_type = drm_vmw_shader_type_vs;
880       break;
881    case SVGA3D_SHADERTYPE_PS:
882       sh_arg.shader_type = drm_vmw_shader_type_ps;
883       break;
884    default:
885       assert(!"Invalid shader type.");
886       break;
887    }
888 
889    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
890 			     &sh_arg, sizeof(sh_arg));
891 
892    if (ret)
893       return SVGA3D_INVALID_ID;
894 
895    return sh_arg.shader_handle;
896 }
897 
898 void
vmw_ioctl_shader_destroy(struct vmw_winsys_screen * vws,uint32 shid)899 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
900 {
901    struct drm_vmw_shader_arg sh_arg;
902 
903    VMW_FUNC;
904 
905    memset(&sh_arg, 0, sizeof(sh_arg));
906    sh_arg.handle = shid;
907 
908    (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
909 			 &sh_arg, sizeof(sh_arg));
910 
911 }
912 
913 static int
vmw_ioctl_parse_caps(struct vmw_winsys_screen * vws,const uint32_t * cap_buffer)914 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
915 		     const uint32_t *cap_buffer)
916 {
917    int i;
918 
919    if (vws->base.have_gb_objects) {
920       for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
921 	 vws->ioctl.cap_3d[i].has_cap = true;
922 	 vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
923       }
924       return 0;
925    } else {
926       const uint32 *capsBlock;
927       const SVGA3dCapsRecord *capsRecord = NULL;
928       uint32 offset;
929       const SVGA3dCapPair *capArray;
930       int numCaps, index;
931 
932       /*
933        * Search linearly through the caps block records for the specified type.
934        */
935       capsBlock = cap_buffer;
936       for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
937 	 const SVGA3dCapsRecord *record;
938 	 assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
939 	 record = (const SVGA3dCapsRecord *) (capsBlock + offset);
940 	 if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) &&
941 	     (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) &&
942 	     (!capsRecord || (record->header.type > capsRecord->header.type))) {
943 	    capsRecord = record;
944 	 }
945       }
946 
947       if(!capsRecord)
948 	 return -1;
949 
950       /*
951        * Calculate the number of caps from the size of the record.
952        */
953       capArray = (const SVGA3dCapPair *) capsRecord->data;
954       numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
955 			sizeof capsRecord->header) / (2 * sizeof(uint32)));
956 
957       for (i = 0; i < numCaps; i++) {
958 	 index = capArray[i][0];
959 	 if (index < vws->ioctl.num_cap_3d) {
960 	    vws->ioctl.cap_3d[index].has_cap = true;
961 	    vws->ioctl.cap_3d[index].result.u = capArray[i][1];
962 	 } else {
963 	    debug_printf("Unknown devcaps seen: %d\n", index);
964 	 }
965       }
966    }
967    return 0;
968 }
969 
970 bool
vmw_ioctl_init(struct vmw_winsys_screen * vws)971 vmw_ioctl_init(struct vmw_winsys_screen *vws)
972 {
973    struct drm_vmw_getparam_arg gp_arg;
974    struct drm_vmw_get_3d_cap_arg cap_arg;
975    unsigned int size;
976    int ret;
977    uint32_t *cap_buffer;
978    drmVersionPtr version;
979    bool drm_gb_capable;
980    bool have_drm_2_5;
981    const char *getenv_val;
982 
983    VMW_FUNC;
984 
985    version = drmGetVersion(vws->ioctl.drm_fd);
986    if (!version)
987       goto out_no_version;
988 
989    have_drm_2_5 = version->version_major > 2 ||
990       (version->version_major == 2 && version->version_minor > 4);
991    vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
992       (version->version_major == 2 && version->version_minor > 5);
993    vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
994       (version->version_major == 2 && version->version_minor > 8);
995    vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
996       (version->version_major == 2 && version->version_minor > 14);
997    vws->ioctl.have_drm_2_16 = version->version_major > 2 ||
998       (version->version_major == 2 && version->version_minor > 15);
999    vws->ioctl.have_drm_2_17 = version->version_major > 2 ||
1000       (version->version_major == 2 && version->version_minor > 16);
1001    vws->ioctl.have_drm_2_18 = version->version_major > 2 ||
1002       (version->version_major == 2 && version->version_minor > 17);
1003    vws->ioctl.have_drm_2_19 = version->version_major > 2 ||
1004       (version->version_major == 2 && version->version_minor > 18);
1005    vws->ioctl.have_drm_2_20 = version->version_major > 2 ||
1006       (version->version_major == 2 && version->version_minor > 19);
1007 
1008    vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
1009 
1010    drm_gb_capable = have_drm_2_5;
1011 
1012    memset(&gp_arg, 0, sizeof(gp_arg));
1013    gp_arg.param = DRM_VMW_PARAM_3D;
1014    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1015 			     &gp_arg, sizeof(gp_arg));
1016    if (ret || gp_arg.value == 0) {
1017       vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
1018       goto out_no_3d;
1019    }
1020 
1021    memset(&gp_arg, 0, sizeof(gp_arg));
1022    gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
1023    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1024 			     &gp_arg, sizeof(gp_arg));
1025    if (ret) {
1026       vmw_error("Failed to get fifo hw version (%i, %s).\n",
1027                 ret, strerror(-ret));
1028       goto out_no_3d;
1029    }
1030    vws->ioctl.hwversion = gp_arg.value;
1031    getenv_val = getenv("SVGA_FORCE_HOST_BACKED");
1032    if (!getenv_val || strcmp(getenv_val, "0") == 0) {
1033       memset(&gp_arg, 0, sizeof(gp_arg));
1034       gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
1035       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1036                                 &gp_arg, sizeof(gp_arg));
1037    } else {
1038       ret = -EINVAL;
1039    }
1040    if (ret)
1041       vws->base.have_gb_objects = false;
1042    else
1043       vws->base.have_gb_objects =
1044          !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
1045 
1046    if (vws->base.have_gb_objects && !drm_gb_capable)
1047       goto out_no_3d;
1048 
1049    vws->base.have_vgpu10 = false;
1050    vws->base.have_sm4_1 = false;
1051    vws->base.have_intra_surface_copy = false;
1052 
1053    memset(&gp_arg, 0, sizeof(gp_arg));
1054    gp_arg.param = DRM_VMW_PARAM_DEVICE_ID;
1055    ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1056               &gp_arg, sizeof(gp_arg));
1057    if (ret || gp_arg.value == 0) {
1058       vws->base.device_id = 0x0405; /* assume SVGA II */
1059    } else {
1060       vws->base.device_id = gp_arg.value;
1061    }
1062 
1063    if (vws->base.have_gb_objects) {
1064       memset(&gp_arg, 0, sizeof(gp_arg));
1065       gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
1066       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1067                                 &gp_arg, sizeof(gp_arg));
1068       if (ret) {
1069          /* Just guess a large enough value. */
1070          vws->ioctl.max_mob_memory = 256*1024*1024;
1071       } else {
1072          vws->ioctl.max_mob_memory = gp_arg.value;
1073       }
1074 
1075       memset(&gp_arg, 0, sizeof(gp_arg));
1076       gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
1077       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1078                                 &gp_arg, sizeof(gp_arg));
1079 
1080       if (ret || gp_arg.value == 0) {
1081            vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1082       } else {
1083            vws->ioctl.max_texture_size = gp_arg.value;
1084       }
1085 
1086       /* Never early flush surfaces, mobs do accounting. */
1087       vws->ioctl.max_surface_memory = -1;
1088 
1089       if (vws->ioctl.have_drm_2_9) {
1090          memset(&gp_arg, 0, sizeof(gp_arg));
1091          gp_arg.param = DRM_VMW_PARAM_DX;
1092          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1093                                    &gp_arg, sizeof(gp_arg));
1094          if (ret == 0 && gp_arg.value != 0) {
1095             const char *vgpu10_val;
1096 
1097             debug_printf("Have VGPU10 interface and hardware.\n");
1098             vws->base.have_vgpu10 = true;
1099             vgpu10_val = getenv("SVGA_VGPU10");
1100             if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) {
1101                debug_printf("Disabling VGPU10 interface.\n");
1102                vws->base.have_vgpu10 = false;
1103             } else {
1104                debug_printf("Enabling VGPU10 interface.\n");
1105             }
1106          }
1107       }
1108 
1109       if (vws->ioctl.have_drm_2_15 && vws->base.have_vgpu10) {
1110          memset(&gp_arg, 0, sizeof(gp_arg));
1111          gp_arg.param = DRM_VMW_PARAM_HW_CAPS2;
1112          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1113                                    &gp_arg, sizeof(gp_arg));
1114          if (ret == 0 && gp_arg.value != 0) {
1115             vws->base.have_intra_surface_copy = true;
1116          }
1117 
1118          memset(&gp_arg, 0, sizeof(gp_arg));
1119          gp_arg.param = DRM_VMW_PARAM_SM4_1;
1120          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1121                                    &gp_arg, sizeof(gp_arg));
1122          if (ret == 0 && gp_arg.value != 0) {
1123             vws->base.have_sm4_1 = true;
1124          }
1125       }
1126 
1127       if (vws->ioctl.have_drm_2_18 && vws->base.have_sm4_1) {
1128          memset(&gp_arg, 0, sizeof(gp_arg));
1129          gp_arg.param = DRM_VMW_PARAM_SM5;
1130          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1131                                    &gp_arg, sizeof(gp_arg));
1132          if (ret == 0 && gp_arg.value != 0) {
1133             vws->base.have_sm5 = true;
1134          }
1135       }
1136 
1137       if (vws->ioctl.have_drm_2_20 && vws->base.have_sm5) {
1138          memset(&gp_arg, 0, sizeof(gp_arg));
1139          gp_arg.param = DRM_VMW_PARAM_GL43;
1140          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1141                                    &gp_arg, sizeof(gp_arg));
1142          if (ret == 0 && gp_arg.value != 0) {
1143             vws->base.have_gl43 = true;
1144          }
1145       }
1146 
1147       memset(&gp_arg, 0, sizeof(gp_arg));
1148       gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
1149       ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1150                                 &gp_arg, sizeof(gp_arg));
1151       if (ret)
1152          size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1153       else
1154          size = gp_arg.value;
1155 
1156       if (vws->base.have_gb_objects)
1157          vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
1158       else
1159          vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1160 
1161       if (vws->ioctl.have_drm_2_16) {
1162          vws->base.have_coherent = true;
1163          getenv_val = getenv("SVGA_FORCE_COHERENT");
1164          if (getenv_val && strcmp(getenv_val, "0") != 0)
1165             vws->force_coherent = true;
1166       }
1167    } else {
1168       vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1169 
1170       memset(&gp_arg, 0, sizeof(gp_arg));
1171       gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
1172       if (have_drm_2_5)
1173          ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1174                                    &gp_arg, sizeof(gp_arg));
1175       if (!have_drm_2_5 || ret) {
1176          /* Just guess a large enough value, around 800mb. */
1177          vws->ioctl.max_surface_memory = 0x30000000;
1178       } else {
1179          vws->ioctl.max_surface_memory = gp_arg.value;
1180       }
1181 
1182       vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1183 
1184       size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1185    }
1186 
1187    debug_printf("VGPU10 interface is %s.\n",
1188                 vws->base.have_vgpu10 ? "on" : "off");
1189 
1190    cap_buffer = calloc(1, size);
1191    if (!cap_buffer) {
1192       debug_printf("Failed alloc fifo 3D caps buffer.\n");
1193       goto out_no_3d;
1194    }
1195 
1196    vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
1197 			      sizeof(*vws->ioctl.cap_3d));
1198    if (!vws->ioctl.cap_3d) {
1199       debug_printf("Failed alloc fifo 3D caps buffer.\n");
1200       goto out_no_caparray;
1201    }
1202 
1203    memset(&cap_arg, 0, sizeof(cap_arg));
1204    cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
1205    cap_arg.max_size = size;
1206 
1207    /*
1208     * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and
1209     * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel
1210     * driver sends the supported cap.
1211     */
1212    ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
1213 			 &cap_arg, sizeof(cap_arg));
1214 
1215    if (ret) {
1216       debug_printf("Failed to get 3D capabilities"
1217 		   " (%i, %s).\n", ret, strerror(-ret));
1218       goto out_no_caps;
1219    }
1220 
1221    ret = vmw_ioctl_parse_caps(vws, cap_buffer);
1222    if (ret) {
1223       debug_printf("Failed to parse 3D capabilities"
1224 		   " (%i, %s).\n", ret, strerror(-ret));
1225       goto out_no_caps;
1226    }
1227 
1228    if (((version->version_major == 2 && version->version_minor >= 10)
1229        || version->version_major > 2) && vws->base.have_vgpu10) {
1230 
1231      /* support for these commands didn't make it into vmwgfx kernel
1232       * modules before 2.10.
1233       */
1234       vws->base.have_generate_mipmap_cmd = true;
1235       vws->base.have_set_predication_cmd = true;
1236    }
1237 
1238    if (version->version_major == 2 && version->version_minor >= 14) {
1239       vws->base.have_fence_fd = true;
1240    }
1241 
1242    free(cap_buffer);
1243    drmFreeVersion(version);
1244    vmw_printf("%s OK\n", __func__);
1245    return true;
1246   out_no_caps:
1247    free(vws->ioctl.cap_3d);
1248   out_no_caparray:
1249    free(cap_buffer);
1250   out_no_3d:
1251    drmFreeVersion(version);
1252   out_no_version:
1253    vws->ioctl.num_cap_3d = 0;
1254    debug_printf("%s Failed\n", __func__);
1255    return false;
1256 }
1257 
1258 
1259 
1260 void
vmw_ioctl_cleanup(struct vmw_winsys_screen * vws)1261 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
1262 {
1263    VMW_FUNC;
1264 
1265    free(vws->ioctl.cap_3d);
1266 }
1267