1 /*
2 * Copyright (c) 2009-2024 Broadcom. All Rights Reserved.
3 * The term “Broadcom” refers to Broadcom Inc.
4 * and/or its subsidiaries.
5 * SPDX-License-Identifier: MIT
6 */
7
8 /**
9 * @file
10 *
11 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
12 * drm winsys.
13 *
14 * Based on svgaicd_escape.c
15 */
16
17
18 #include "svga_cmd.h"
19 #include "util/u_memory.h"
20 #include "util/u_math.h"
21 #include "svgadump/svga_dump.h"
22 #include "frontend/drm_driver.h"
23 #include "vmw_screen.h"
24 #include "vmw_context.h"
25 #include "vmw_fence.h"
26 #include "xf86drm.h"
27 #include "vmwgfx_drm.h"
28 #include "svga3d_devcaps.h"
29 #include "svga3d_reg.h"
30
31 #include "util/os_mman.h"
32
33 #include <errno.h>
34 #include <unistd.h>
35
36 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
37 #define VMW_FENCE_TIMEOUT_SECONDS 3600UL
38
39 #define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
40 #define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
41 #define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
42 (svga3d_flags & ((uint64_t)UINT32_MAX))
43
44 struct vmw_region
45 {
46 uint32_t handle;
47 uint64_t map_handle;
48 void *data;
49 uint32_t map_count;
50 int drm_fd;
51 uint32_t size;
52 };
53
54 uint32_t
vmw_region_size(struct vmw_region * region)55 vmw_region_size(struct vmw_region *region)
56 {
57 return region->size;
58 }
59
60 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
61 defined(__NetBSD__) || defined(__OpenBSD__)
62 #define ERESTART EINTR
63 #endif
64
65 uint32
vmw_ioctl_context_create(struct vmw_winsys_screen * vws)66 vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
67 {
68 struct drm_vmw_context_arg c_arg;
69 int ret;
70
71 VMW_FUNC;
72
73 ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
74 &c_arg, sizeof(c_arg));
75
76 if (ret)
77 return -1;
78
79 vmw_printf("Context id is %d\n", c_arg.cid);
80 return c_arg.cid;
81 }
82
83 uint32
vmw_ioctl_extended_context_create(struct vmw_winsys_screen * vws,bool vgpu10)84 vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
85 bool vgpu10)
86 {
87 union drm_vmw_extended_context_arg c_arg;
88 int ret;
89
90 VMW_FUNC;
91 memset(&c_arg, 0, sizeof(c_arg));
92 c_arg.req = (vgpu10 ? drm_vmw_context_dx : drm_vmw_context_legacy);
93 ret = drmCommandWriteRead(vws->ioctl.drm_fd,
94 DRM_VMW_CREATE_EXTENDED_CONTEXT,
95 &c_arg, sizeof(c_arg));
96
97 if (ret)
98 return -1;
99
100 vmw_printf("Context id is %d\n", c_arg.cid);
101 return c_arg.rep.cid;
102 }
103
104 void
vmw_ioctl_context_destroy(struct vmw_winsys_screen * vws,uint32 cid)105 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
106 {
107 struct drm_vmw_context_arg c_arg;
108
109 VMW_FUNC;
110
111 memset(&c_arg, 0, sizeof(c_arg));
112 c_arg.cid = cid;
113
114 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
115 &c_arg, sizeof(c_arg));
116
117 }
118
119 uint32
vmw_ioctl_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurface1Flags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount)120 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
121 SVGA3dSurface1Flags flags,
122 SVGA3dSurfaceFormat format,
123 unsigned usage,
124 SVGA3dSize size,
125 uint32_t numFaces, uint32_t numMipLevels,
126 unsigned sampleCount)
127 {
128 union drm_vmw_surface_create_arg s_arg;
129 struct drm_vmw_surface_create_req *req = &s_arg.req;
130 struct drm_vmw_surface_arg *rep = &s_arg.rep;
131 struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
132 DRM_VMW_MAX_MIP_LEVELS];
133 struct drm_vmw_size *cur_size;
134 uint32_t iFace;
135 uint32_t iMipLevel;
136 int ret;
137
138 vmw_printf("%s flags %d format %d\n", __func__, flags, format);
139
140 memset(&s_arg, 0, sizeof(s_arg));
141 req->flags = (uint32_t) flags;
142 req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT);
143 req->format = (uint32_t) format;
144 req->shareable = true;
145
146 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
147 DRM_VMW_MAX_MIP_LEVELS);
148 cur_size = sizes;
149 for (iFace = 0; iFace < numFaces; ++iFace) {
150 SVGA3dSize mipSize = size;
151
152 req->mip_levels[iFace] = numMipLevels;
153 for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
154 cur_size->width = mipSize.width;
155 cur_size->height = mipSize.height;
156 cur_size->depth = mipSize.depth;
157 mipSize.width = MAX2(mipSize.width >> 1, 1);
158 mipSize.height = MAX2(mipSize.height >> 1, 1);
159 mipSize.depth = MAX2(mipSize.depth >> 1, 1);
160 cur_size++;
161 }
162 }
163 for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
164 req->mip_levels[iFace] = 0;
165 }
166
167 req->size_addr = (unsigned long)&sizes;
168
169 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
170 &s_arg, sizeof(s_arg));
171
172 if (ret)
173 return -1;
174
175 vmw_printf("Surface id is %d\n", rep->sid);
176
177 return rep->sid;
178 }
179
180
181 uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurfaceAllFlags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount,uint32_t buffer_handle,SVGA3dMSPattern multisamplePattern,SVGA3dMSQualityLevel qualityLevel,struct vmw_region ** p_region)182 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
183 SVGA3dSurfaceAllFlags flags,
184 SVGA3dSurfaceFormat format,
185 unsigned usage,
186 SVGA3dSize size,
187 uint32_t numFaces,
188 uint32_t numMipLevels,
189 unsigned sampleCount,
190 uint32_t buffer_handle,
191 SVGA3dMSPattern multisamplePattern,
192 SVGA3dMSQualityLevel qualityLevel,
193 struct vmw_region **p_region)
194 {
195 union {
196 union drm_vmw_gb_surface_create_ext_arg ext_arg;
197 union drm_vmw_gb_surface_create_arg arg;
198 } s_arg;
199 struct drm_vmw_gb_surface_create_rep *rep;
200 struct vmw_region *region = NULL;
201 int ret;
202
203 vmw_printf("%s flags %d format %d\n", __func__, flags, format);
204
205 if (p_region) {
206 region = CALLOC_STRUCT(vmw_region);
207 if (!region)
208 return SVGA3D_INVALID_ID;
209 }
210
211 memset(&s_arg, 0, sizeof(s_arg));
212
213 if (vws->ioctl.have_drm_2_15) {
214 struct drm_vmw_gb_surface_create_ext_req *req = &s_arg.ext_arg.req;
215 rep = &s_arg.ext_arg.rep;
216
217 req->version = drm_vmw_gb_surface_v1;
218 req->multisample_pattern = multisamplePattern;
219 req->quality_level = qualityLevel;
220 req->buffer_byte_stride = 0;
221 req->must_be_zero = 0;
222 req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
223 req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
224 req->base.format = (uint32_t) format;
225
226 if (usage & SVGA_SURFACE_USAGE_SCANOUT)
227 req->base.drm_surface_flags |= drm_vmw_surface_flag_scanout;
228
229 if ((usage & SVGA_SURFACE_USAGE_COHERENT) || vws->force_coherent)
230 req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
231
232 req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
233 req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
234 req->base.base_size.width = size.width;
235 req->base.base_size.height = size.height;
236 req->base.base_size.depth = size.depth;
237 req->base.mip_levels = numMipLevels;
238 req->base.multisample_count = 0;
239 req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
240
241 if (vws->base.have_vgpu10) {
242 req->base.array_size = numFaces;
243 req->base.multisample_count = sampleCount;
244 } else {
245 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
246 DRM_VMW_MAX_MIP_LEVELS);
247 req->base.array_size = 0;
248 }
249
250 req->base.buffer_handle = buffer_handle ?
251 buffer_handle : SVGA3D_INVALID_ID;
252
253 ret = drmCommandWriteRead(vws->ioctl.drm_fd,
254 DRM_VMW_GB_SURFACE_CREATE_EXT, &s_arg.ext_arg,
255 sizeof(s_arg.ext_arg));
256
257 if (ret)
258 goto out_fail_create;
259 } else {
260 struct drm_vmw_gb_surface_create_req *req = &s_arg.arg.req;
261 rep = &s_arg.arg.rep;
262
263 req->svga3d_flags = (uint32_t) flags;
264 req->format = (uint32_t) format;
265
266 if (usage & SVGA_SURFACE_USAGE_SCANOUT)
267 req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
268
269 req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
270
271 req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
272 req->base_size.width = size.width;
273 req->base_size.height = size.height;
274 req->base_size.depth = size.depth;
275 req->mip_levels = numMipLevels;
276 req->multisample_count = 0;
277 req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
278
279 if (vws->base.have_vgpu10) {
280 req->array_size = numFaces;
281 req->multisample_count = sampleCount;
282 } else {
283 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
284 DRM_VMW_MAX_MIP_LEVELS);
285 req->array_size = 0;
286 }
287
288 req->buffer_handle = buffer_handle ?
289 buffer_handle : SVGA3D_INVALID_ID;
290
291 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
292 &s_arg.arg, sizeof(s_arg.arg));
293
294 if (ret)
295 goto out_fail_create;
296 }
297
298 if (p_region) {
299 region->handle = rep->buffer_handle;
300 region->map_handle = rep->buffer_map_handle;
301 region->drm_fd = vws->ioctl.drm_fd;
302 region->size = rep->backup_size;
303 *p_region = region;
304 }
305
306 vmw_printf("Surface id is %d\n", rep->sid);
307 return rep->handle;
308
309 out_fail_create:
310 FREE(region);
311 return SVGA3D_INVALID_ID;
312 }
313
314 /**
315 * vmw_ioctl_surface_req - Fill in a struct surface_req
316 *
317 * @vws: Winsys screen
318 * @whandle: Surface handle
319 * @req: The struct surface req to fill in
320 * @needs_unref: This call takes a kernel surface reference that needs to
321 * be unreferenced.
322 *
323 * Returns 0 on success, negative error type otherwise.
324 * Fills in the surface_req structure according to handle type and kernel
325 * capabilities.
326 */
327 static int
vmw_ioctl_surface_req(const struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,struct drm_vmw_surface_arg * req,bool * needs_unref)328 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
329 const struct winsys_handle *whandle,
330 struct drm_vmw_surface_arg *req,
331 bool *needs_unref)
332 {
333 int ret;
334
335 switch(whandle->type) {
336 case WINSYS_HANDLE_TYPE_SHARED:
337 case WINSYS_HANDLE_TYPE_KMS:
338 *needs_unref = false;
339 req->handle_type = DRM_VMW_HANDLE_LEGACY;
340 req->sid = whandle->handle;
341 break;
342 case WINSYS_HANDLE_TYPE_FD:
343 if (!vws->ioctl.have_drm_2_6) {
344 uint32_t handle;
345
346 ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
347 if (ret) {
348 vmw_error("Failed to get handle from prime fd %d.\n",
349 (int) whandle->handle);
350 return -EINVAL;
351 }
352
353 *needs_unref = true;
354 req->handle_type = DRM_VMW_HANDLE_LEGACY;
355 req->sid = handle;
356 } else {
357 *needs_unref = false;
358 req->handle_type = DRM_VMW_HANDLE_PRIME;
359 req->sid = whandle->handle;
360 }
361 break;
362 default:
363 vmw_error("Attempt to import unsupported handle type %d.\n",
364 whandle->type);
365 return -EINVAL;
366 }
367
368 return 0;
369 }
370
371 /**
372 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
373 * get surface information
374 *
375 * @vws: Screen to register the reference on
376 * @handle: Kernel handle of the guest-backed surface
377 * @flags: flags used when the surface was created
378 * @format: Format used when the surface was created
379 * @numMipLevels: Number of mipmap levels of the surface
380 * @p_region: On successful return points to a newly allocated
381 * struct vmw_region holding a reference to the surface backup buffer.
382 *
383 * Returns 0 on success, a system error on failure.
384 */
385 int
vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,SVGA3dSurfaceAllFlags * flags,SVGA3dSurfaceFormat * format,uint32_t * numMipLevels,uint32_t * handle,struct vmw_region ** p_region)386 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
387 const struct winsys_handle *whandle,
388 SVGA3dSurfaceAllFlags *flags,
389 SVGA3dSurfaceFormat *format,
390 uint32_t *numMipLevels,
391 uint32_t *handle,
392 struct vmw_region **p_region)
393 {
394 struct vmw_region *region = NULL;
395 bool needs_unref = false;
396 int ret;
397
398 assert(p_region != NULL);
399 region = CALLOC_STRUCT(vmw_region);
400 if (!region)
401 return -ENOMEM;
402
403 if (vws->ioctl.have_drm_2_15) {
404 union drm_vmw_gb_surface_reference_ext_arg s_arg;
405 struct drm_vmw_surface_arg *req = &s_arg.req;
406 struct drm_vmw_gb_surface_ref_ext_rep *rep = &s_arg.rep;
407
408 memset(&s_arg, 0, sizeof(s_arg));
409 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
410 if (ret)
411 goto out_fail_req;
412
413 *handle = req->sid;
414 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF_EXT,
415 &s_arg, sizeof(s_arg));
416
417 if (ret)
418 goto out_fail_ref;
419
420 region->handle = rep->crep.buffer_handle;
421 region->map_handle = rep->crep.buffer_map_handle;
422 region->drm_fd = vws->ioctl.drm_fd;
423 region->size = rep->crep.backup_size;
424 *p_region = region;
425
426 *handle = rep->crep.handle;
427 *flags = SVGA3D_FLAGS_64(rep->creq.svga3d_flags_upper_32_bits,
428 rep->creq.base.svga3d_flags);
429 *format = rep->creq.base.format;
430 *numMipLevels = rep->creq.base.mip_levels;
431 } else {
432 union drm_vmw_gb_surface_reference_arg s_arg;
433 struct drm_vmw_surface_arg *req = &s_arg.req;
434 struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
435
436 memset(&s_arg, 0, sizeof(s_arg));
437 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
438 if (ret)
439 goto out_fail_req;
440
441 *handle = req->sid;
442 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
443 &s_arg, sizeof(s_arg));
444
445 if (ret)
446 goto out_fail_ref;
447
448 region->handle = rep->crep.buffer_handle;
449 region->map_handle = rep->crep.buffer_map_handle;
450 region->drm_fd = vws->ioctl.drm_fd;
451 region->size = rep->crep.backup_size;
452 *p_region = region;
453
454 *handle = rep->crep.handle;
455 *flags = rep->creq.svga3d_flags;
456 *format = rep->creq.format;
457 *numMipLevels = rep->creq.mip_levels;
458 }
459
460 vmw_printf("%s flags %d format %d\n", __func__, *flags, *format);
461
462 if (needs_unref)
463 vmw_ioctl_surface_destroy(vws, *handle);
464
465 return 0;
466 out_fail_ref:
467 if (needs_unref)
468 vmw_ioctl_surface_destroy(vws, *handle);
469 out_fail_req:
470 FREE(region);
471 return ret;
472 }
473
474 void
vmw_ioctl_surface_destroy(struct vmw_winsys_screen * vws,uint32 sid)475 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
476 {
477 struct drm_vmw_surface_arg s_arg;
478
479 VMW_FUNC;
480
481 memset(&s_arg, 0, sizeof(s_arg));
482 s_arg.sid = sid;
483
484 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
485 &s_arg, sizeof(s_arg));
486 }
487
488 void
vmw_ioctl_command(struct vmw_winsys_screen * vws,int32_t cid,uint32_t throttle_us,void * commands,uint32_t size,struct pipe_fence_handle ** pfence,int32_t imported_fence_fd,uint32_t flags)489 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
490 uint32_t throttle_us, void *commands, uint32_t size,
491 struct pipe_fence_handle **pfence, int32_t imported_fence_fd,
492 uint32_t flags)
493 {
494 struct drm_vmw_execbuf_arg arg;
495 struct drm_vmw_fence_rep rep;
496 int ret;
497 int argsize;
498
499 #if MESA_DEBUG
500 {
501 static bool firsttime = true;
502 static bool debug = false;
503 static bool skip = false;
504 if (firsttime) {
505 debug = debug_get_bool_option("SVGA_DUMP_CMD", false);
506 skip = debug_get_bool_option("SVGA_SKIP_CMD", false);
507 }
508 if (debug) {
509 VMW_FUNC;
510 svga_dump_commands(commands, size);
511 }
512 firsttime = false;
513 if (skip) {
514 size = 0;
515 }
516 }
517 #endif
518
519 memset(&arg, 0, sizeof(arg));
520 memset(&rep, 0, sizeof(rep));
521
522 if (flags & SVGA_HINT_FLAG_EXPORT_FENCE_FD) {
523 arg.flags |= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD;
524 }
525
526 if (imported_fence_fd != -1) {
527 arg.flags |= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD;
528 }
529
530 rep.error = -EFAULT;
531 if (pfence)
532 arg.fence_rep = (unsigned long)&rep;
533 arg.commands = (unsigned long)commands;
534 arg.command_size = size;
535 arg.throttle_us = throttle_us;
536 arg.version = vws->ioctl.drm_execbuf_version;
537 arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID);
538
539 /* Older DRM module requires this to be zero */
540 if (vws->base.have_fence_fd)
541 arg.imported_fence_fd = imported_fence_fd;
542
543 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
544 * the flags field. The structure size sent to drmCommandWrite must match
545 * the drm_execbuf_version. Otherwise, an invalid value will be returned.
546 */
547 argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) :
548 offsetof(struct drm_vmw_execbuf_arg, context_handle);
549 do {
550 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize);
551 if (ret == -EBUSY)
552 usleep(1000);
553 } while(ret == -ERESTART || ret == -EBUSY);
554 if (ret) {
555 vmw_error("%s error %s.\n", __func__, strerror(-ret));
556 abort();
557 }
558
559 if (rep.error) {
560
561 /*
562 * Kernel has already synced, or caller requested no fence.
563 */
564 if (pfence)
565 *pfence = NULL;
566 } else {
567 if (pfence) {
568 vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
569 true);
570
571 /* Older DRM module will set this to zero, but -1 is the proper FD
572 * to use for no Fence FD support */
573 if (!vws->base.have_fence_fd)
574 rep.fd = -1;
575
576 *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
577 rep.seqno, rep.mask, rep.fd);
578 if (*pfence == NULL) {
579 /*
580 * Fence creation failed. Need to sync.
581 */
582 (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
583 vmw_ioctl_fence_unref(vws, rep.handle);
584 }
585 }
586 }
587 }
588
589
590 struct vmw_region *
vmw_ioctl_region_create(struct vmw_winsys_screen * vws,uint32_t size)591 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
592 {
593 struct vmw_region *region;
594 union drm_vmw_alloc_dmabuf_arg arg;
595 struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
596 struct drm_vmw_dmabuf_rep *rep = &arg.rep;
597 int ret;
598
599 vmw_printf("%s: size = %u\n", __func__, size);
600
601 region = CALLOC_STRUCT(vmw_region);
602 if (!region)
603 goto out_err1;
604
605 memset(&arg, 0, sizeof(arg));
606 req->size = size;
607 do {
608 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
609 sizeof(arg));
610 } while (ret == -ERESTART);
611
612 if (ret) {
613 vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
614 goto out_err1;
615 }
616
617 region->data = NULL;
618 region->handle = rep->handle;
619 region->map_handle = rep->map_handle;
620 region->map_count = 0;
621 region->size = size;
622 region->drm_fd = vws->ioctl.drm_fd;
623
624 vmw_printf(" gmrId = %u, offset = %u\n",
625 region->ptr.gmrId, region->ptr.offset);
626
627 return region;
628
629 out_err1:
630 FREE(region);
631 return NULL;
632 }
633
634 void
vmw_ioctl_region_destroy(struct vmw_region * region)635 vmw_ioctl_region_destroy(struct vmw_region *region)
636 {
637 struct drm_vmw_unref_dmabuf_arg arg;
638
639 vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
640 region->ptr.gmrId, region->ptr.offset);
641
642 if (region->data) {
643 os_munmap(region->data, region->size);
644 region->data = NULL;
645 }
646
647 memset(&arg, 0, sizeof(arg));
648 arg.handle = region->handle;
649 drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
650
651 FREE(region);
652 }
653
654 SVGAGuestPtr
vmw_ioctl_region_ptr(struct vmw_region * region)655 vmw_ioctl_region_ptr(struct vmw_region *region)
656 {
657 SVGAGuestPtr ptr = {region->handle, 0};
658 return ptr;
659 }
660
661 void *
vmw_ioctl_region_map(struct vmw_region * region)662 vmw_ioctl_region_map(struct vmw_region *region)
663 {
664 void *map;
665
666 vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
667 region->ptr.gmrId, region->ptr.offset);
668
669 if (region->data == NULL) {
670 map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
671 region->drm_fd, region->map_handle);
672 if (map == MAP_FAILED) {
673 vmw_error("%s: Map failed.\n", __func__);
674 return NULL;
675 }
676
677 // MADV_HUGEPAGE only exists on Linux
678 #ifdef MADV_HUGEPAGE
679 (void) madvise(map, region->size, MADV_HUGEPAGE);
680 #endif
681 region->data = map;
682 }
683
684 ++region->map_count;
685
686 return region->data;
687 }
688
689 void
vmw_ioctl_region_unmap(struct vmw_region * region)690 vmw_ioctl_region_unmap(struct vmw_region *region)
691 {
692 vmw_printf("%s: gmrId = %u, offset = %u\n", __func__,
693 region->ptr.gmrId, region->ptr.offset);
694
695 --region->map_count;
696 os_munmap(region->data, region->size);
697 region->data = NULL;
698 }
699
700 /**
701 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
702 *
703 * @region: Pointer to a struct vmw_region representing the buffer object.
704 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
705 * GPU is busy with the buffer object.
706 * @readonly: Hint that the CPU access is read-only.
707 * @allow_cs: Allow concurrent command submission while the buffer is
708 * synchronized for CPU. If FALSE command submissions referencing the
709 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
710 *
711 * This function idles any GPU activities touching the buffer and blocks
712 * command submission of commands referencing the buffer, even from
713 * other processes.
714 */
715 int
vmw_ioctl_syncforcpu(struct vmw_region * region,bool dont_block,bool readonly,bool allow_cs)716 vmw_ioctl_syncforcpu(struct vmw_region *region,
717 bool dont_block,
718 bool readonly,
719 bool allow_cs)
720 {
721 struct drm_vmw_synccpu_arg arg;
722 int ret;
723
724 memset(&arg, 0, sizeof(arg));
725 arg.op = drm_vmw_synccpu_grab;
726 arg.handle = region->handle;
727 arg.flags = drm_vmw_synccpu_read;
728 if (!readonly)
729 arg.flags |= drm_vmw_synccpu_write;
730 if (dont_block)
731 arg.flags |= drm_vmw_synccpu_dontblock;
732 if (allow_cs)
733 arg.flags |= drm_vmw_synccpu_allow_cs;
734
735 do {
736 ret = drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
737 if (ret == -EBUSY)
738 usleep(1000);
739 } while (ret == -ERESTART || ret == -EBUSY);
740
741 if (ret)
742 vmw_error("%s Failed synccpu with error %s.\n", __func__, strerror(-ret));
743
744 return ret;
745 }
746
747 /**
748 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
749 *
750 * @region: Pointer to a struct vmw_region representing the buffer object.
751 * @readonly: Should hold the same value as the matching syncforcpu call.
752 * @allow_cs: Should hold the same value as the matching syncforcpu call.
753 */
754 void
vmw_ioctl_releasefromcpu(struct vmw_region * region,bool readonly,bool allow_cs)755 vmw_ioctl_releasefromcpu(struct vmw_region *region,
756 bool readonly,
757 bool allow_cs)
758 {
759 struct drm_vmw_synccpu_arg arg;
760
761 memset(&arg, 0, sizeof(arg));
762 arg.op = drm_vmw_synccpu_release;
763 arg.handle = region->handle;
764 arg.flags = drm_vmw_synccpu_read;
765 if (!readonly)
766 arg.flags |= drm_vmw_synccpu_write;
767 if (allow_cs)
768 arg.flags |= drm_vmw_synccpu_allow_cs;
769
770 (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
771 }
772
773 void
vmw_ioctl_fence_unref(struct vmw_winsys_screen * vws,uint32_t handle)774 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
775 uint32_t handle)
776 {
777 struct drm_vmw_fence_arg arg;
778 int ret;
779
780 memset(&arg, 0, sizeof(arg));
781 arg.handle = handle;
782
783 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
784 &arg, sizeof(arg));
785 if (ret != 0)
786 vmw_error("%s Failed\n", __func__);
787 }
788
789 static inline uint32_t
vmw_drm_fence_flags(uint32_t flags)790 vmw_drm_fence_flags(uint32_t flags)
791 {
792 uint32_t dflags = 0;
793
794 if (flags & SVGA_FENCE_FLAG_EXEC)
795 dflags |= DRM_VMW_FENCE_FLAG_EXEC;
796 if (flags & SVGA_FENCE_FLAG_QUERY)
797 dflags |= DRM_VMW_FENCE_FLAG_QUERY;
798
799 return dflags;
800 }
801
802
803 int
vmw_ioctl_fence_signalled(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)804 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
805 uint32_t handle,
806 uint32_t flags)
807 {
808 struct drm_vmw_fence_signaled_arg arg;
809 uint32_t vflags = vmw_drm_fence_flags(flags);
810 int ret;
811
812 memset(&arg, 0, sizeof(arg));
813 arg.handle = handle;
814 arg.flags = vflags;
815
816 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
817 &arg, sizeof(arg));
818
819 if (ret != 0)
820 return ret;
821
822 vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, false);
823
824 return (arg.signaled) ? 0 : -1;
825 }
826
827
828
829 int
vmw_ioctl_fence_finish(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)830 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
831 uint32_t handle,
832 uint32_t flags)
833 {
834 struct drm_vmw_fence_wait_arg arg;
835 uint32_t vflags = vmw_drm_fence_flags(flags);
836 int ret;
837
838 memset(&arg, 0, sizeof(arg));
839
840 arg.handle = handle;
841 arg.timeout_us = VMW_FENCE_TIMEOUT_SECONDS*1000000;
842 arg.lazy = 0;
843 arg.flags = vflags;
844
845 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
846 &arg, sizeof(arg));
847
848 if (ret != 0)
849 vmw_error("%s Failed\n", __func__);
850
851 return 0;
852 }
853
854 uint32
vmw_ioctl_shader_create(struct vmw_winsys_screen * vws,SVGA3dShaderType type,uint32 code_len)855 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
856 SVGA3dShaderType type,
857 uint32 code_len)
858 {
859 struct drm_vmw_shader_create_arg sh_arg;
860 int ret;
861
862 VMW_FUNC;
863
864 memset(&sh_arg, 0, sizeof(sh_arg));
865
866 sh_arg.size = code_len;
867 sh_arg.buffer_handle = SVGA3D_INVALID_ID;
868 sh_arg.shader_handle = SVGA3D_INVALID_ID;
869 switch (type) {
870 case SVGA3D_SHADERTYPE_VS:
871 sh_arg.shader_type = drm_vmw_shader_type_vs;
872 break;
873 case SVGA3D_SHADERTYPE_PS:
874 sh_arg.shader_type = drm_vmw_shader_type_ps;
875 break;
876 default:
877 assert(!"Invalid shader type.");
878 break;
879 }
880
881 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
882 &sh_arg, sizeof(sh_arg));
883
884 if (ret)
885 return SVGA3D_INVALID_ID;
886
887 return sh_arg.shader_handle;
888 }
889
890 void
vmw_ioctl_shader_destroy(struct vmw_winsys_screen * vws,uint32 shid)891 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
892 {
893 struct drm_vmw_shader_arg sh_arg;
894
895 VMW_FUNC;
896
897 memset(&sh_arg, 0, sizeof(sh_arg));
898 sh_arg.handle = shid;
899
900 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
901 &sh_arg, sizeof(sh_arg));
902
903 }
904
905 struct svga_3d_compat_cap {
906 SVGA3dFifoCapsRecordHeader header;
907 SVGA3dFifoCapPair pairs[SVGA3D_DEVCAP_MAX];
908 };
909
910 static int
vmw_ioctl_parse_caps(struct vmw_winsys_screen * vws,const uint32_t * cap_buffer)911 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
912 const uint32_t *cap_buffer)
913 {
914 int i;
915
916 if (vws->base.have_gb_objects) {
917 for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
918 vws->ioctl.cap_3d[i].has_cap = true;
919 vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
920 }
921 return 0;
922 } else {
923 const uint32 *capsBlock;
924 const struct svga_3d_compat_cap *capsRecord = NULL;
925 uint32 offset;
926 const SVGA3dFifoCapPair *capArray;
927 int numCaps, index;
928
929 /*
930 * Search linearly through the caps block records for the specified type.
931 */
932 capsBlock = cap_buffer;
933 for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
934 const struct svga_3d_compat_cap *record;
935 assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
936 record = (const struct svga_3d_compat_cap *) (capsBlock + offset);
937 if ((record->header.type >= 0) &&
938 (record->header.type <= SVGA3D_DEVCAP_MAX) &&
939 (!capsRecord || (record->header.type > capsRecord->header.type))) {
940 capsRecord = record;
941 }
942 }
943
944 if(!capsRecord)
945 return -1;
946
947 /*
948 * Calculate the number of caps from the size of the record.
949 */
950 capArray = (const SVGA3dFifoCapPair *) capsRecord->pairs;
951 numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
952 sizeof capsRecord->header) / (2 * sizeof(uint32)));
953
954 for (i = 0; i < numCaps; i++) {
955 index = capArray[i][0];
956 if (index < vws->ioctl.num_cap_3d) {
957 vws->ioctl.cap_3d[index].has_cap = true;
958 vws->ioctl.cap_3d[index].result.u = capArray[i][1];
959 } else {
960 debug_printf("Unknown devcaps seen: %d\n", index);
961 }
962 }
963 }
964 return 0;
965 }
966
967 bool
vmw_ioctl_init(struct vmw_winsys_screen * vws)968 vmw_ioctl_init(struct vmw_winsys_screen *vws)
969 {
970 struct drm_vmw_getparam_arg gp_arg;
971 struct drm_vmw_get_3d_cap_arg cap_arg;
972 unsigned int size;
973 int ret;
974 uint32_t *cap_buffer;
975 drmVersionPtr version;
976 bool drm_gb_capable;
977 bool have_drm_2_5;
978 const char *getenv_val;
979
980 VMW_FUNC;
981
982 version = drmGetVersion(vws->ioctl.drm_fd);
983 if (!version)
984 goto out_no_version;
985
986 have_drm_2_5 = version->version_major > 2 ||
987 (version->version_major == 2 && version->version_minor > 4);
988 vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
989 (version->version_major == 2 && version->version_minor > 5);
990 vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
991 (version->version_major == 2 && version->version_minor > 8);
992 vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
993 (version->version_major == 2 && version->version_minor > 14);
994 vws->ioctl.have_drm_2_16 = version->version_major > 2 ||
995 (version->version_major == 2 && version->version_minor > 15);
996 vws->ioctl.have_drm_2_17 = version->version_major > 2 ||
997 (version->version_major == 2 && version->version_minor > 16);
998 vws->ioctl.have_drm_2_18 = version->version_major > 2 ||
999 (version->version_major == 2 && version->version_minor > 17);
1000 vws->ioctl.have_drm_2_19 = version->version_major > 2 ||
1001 (version->version_major == 2 && version->version_minor > 18);
1002 vws->ioctl.have_drm_2_20 = version->version_major > 2 ||
1003 (version->version_major == 2 && version->version_minor > 19);
1004
1005 vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
1006
1007 drm_gb_capable = have_drm_2_5;
1008
1009 memset(&gp_arg, 0, sizeof(gp_arg));
1010 gp_arg.param = DRM_VMW_PARAM_3D;
1011 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1012 &gp_arg, sizeof(gp_arg));
1013 if (ret || gp_arg.value == 0) {
1014 vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
1015 goto out_no_3d;
1016 }
1017
1018 memset(&gp_arg, 0, sizeof(gp_arg));
1019 gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
1020 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1021 &gp_arg, sizeof(gp_arg));
1022 if (ret) {
1023 vmw_error("Failed to get fifo hw version (%i, %s).\n",
1024 ret, strerror(-ret));
1025 goto out_no_3d;
1026 }
1027 vws->ioctl.hwversion = gp_arg.value;
1028 getenv_val = getenv("SVGA_FORCE_HOST_BACKED");
1029 if (!getenv_val || strcmp(getenv_val, "0") == 0) {
1030 memset(&gp_arg, 0, sizeof(gp_arg));
1031 gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
1032 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1033 &gp_arg, sizeof(gp_arg));
1034 } else {
1035 ret = -EINVAL;
1036 }
1037 if (ret)
1038 vws->base.have_gb_objects = false;
1039 else
1040 vws->base.have_gb_objects =
1041 !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
1042
1043 if (vws->base.have_gb_objects && !drm_gb_capable)
1044 goto out_no_3d;
1045
1046 vws->base.have_vgpu10 = false;
1047 vws->base.have_sm4_1 = false;
1048 vws->base.have_intra_surface_copy = false;
1049
1050 memset(&gp_arg, 0, sizeof(gp_arg));
1051 gp_arg.param = DRM_VMW_PARAM_DEVICE_ID;
1052 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1053 &gp_arg, sizeof(gp_arg));
1054 if (ret || gp_arg.value == 0) {
1055 vws->base.device_id = 0x0405; /* assume SVGA II */
1056 } else {
1057 vws->base.device_id = gp_arg.value;
1058 }
1059
1060 if (vws->base.have_gb_objects) {
1061 memset(&gp_arg, 0, sizeof(gp_arg));
1062 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
1063 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1064 &gp_arg, sizeof(gp_arg));
1065 if (ret) {
1066 /* Just guess a large enough value. */
1067 vws->ioctl.max_mob_memory = 256*1024*1024;
1068 } else {
1069 vws->ioctl.max_mob_memory = gp_arg.value;
1070 }
1071
1072 memset(&gp_arg, 0, sizeof(gp_arg));
1073 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
1074 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1075 &gp_arg, sizeof(gp_arg));
1076
1077 if (ret || gp_arg.value == 0) {
1078 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1079 } else {
1080 vws->ioctl.max_texture_size = gp_arg.value;
1081 }
1082
1083 /* Never early flush surfaces, mobs do accounting. */
1084 vws->ioctl.max_surface_memory = -1;
1085
1086 if (vws->ioctl.have_drm_2_9) {
1087 memset(&gp_arg, 0, sizeof(gp_arg));
1088 gp_arg.param = DRM_VMW_PARAM_DX;
1089 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1090 &gp_arg, sizeof(gp_arg));
1091 if (ret == 0 && gp_arg.value != 0) {
1092 const char *vgpu10_val;
1093
1094 debug_printf("Have VGPU10 interface and hardware.\n");
1095 vws->base.have_vgpu10 = true;
1096 vgpu10_val = getenv("SVGA_VGPU10");
1097 if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) {
1098 debug_printf("Disabling VGPU10 interface.\n");
1099 vws->base.have_vgpu10 = false;
1100 } else {
1101 debug_printf("Enabling VGPU10 interface.\n");
1102 }
1103 }
1104 }
1105
1106 if (vws->ioctl.have_drm_2_15 && vws->base.have_vgpu10) {
1107 memset(&gp_arg, 0, sizeof(gp_arg));
1108 gp_arg.param = DRM_VMW_PARAM_HW_CAPS2;
1109 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1110 &gp_arg, sizeof(gp_arg));
1111 if (ret == 0 && gp_arg.value != 0) {
1112 vws->base.have_intra_surface_copy = true;
1113 }
1114
1115 memset(&gp_arg, 0, sizeof(gp_arg));
1116 gp_arg.param = DRM_VMW_PARAM_SM4_1;
1117 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1118 &gp_arg, sizeof(gp_arg));
1119 if (ret == 0 && gp_arg.value != 0) {
1120 vws->base.have_sm4_1 = true;
1121 }
1122 }
1123
1124 if (vws->ioctl.have_drm_2_18 && vws->base.have_sm4_1) {
1125 memset(&gp_arg, 0, sizeof(gp_arg));
1126 gp_arg.param = DRM_VMW_PARAM_SM5;
1127 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1128 &gp_arg, sizeof(gp_arg));
1129 if (ret == 0 && gp_arg.value != 0) {
1130 vws->base.have_sm5 = true;
1131 }
1132 }
1133
1134 if (vws->ioctl.have_drm_2_20 && vws->base.have_sm5) {
1135 memset(&gp_arg, 0, sizeof(gp_arg));
1136 gp_arg.param = DRM_VMW_PARAM_GL43;
1137 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1138 &gp_arg, sizeof(gp_arg));
1139 if (ret == 0 && gp_arg.value != 0) {
1140 vws->base.have_gl43 = true;
1141 }
1142 }
1143
1144 memset(&gp_arg, 0, sizeof(gp_arg));
1145 gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
1146 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1147 &gp_arg, sizeof(gp_arg));
1148 if (ret)
1149 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1150 else
1151 size = gp_arg.value;
1152
1153 if (vws->base.have_gb_objects)
1154 vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
1155 else
1156 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1157
1158 if (vws->ioctl.have_drm_2_16) {
1159 vws->base.have_coherent = true;
1160 getenv_val = getenv("SVGA_FORCE_COHERENT");
1161 if (getenv_val && strcmp(getenv_val, "0") != 0)
1162 vws->force_coherent = true;
1163 }
1164 } else {
1165 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
1166
1167 memset(&gp_arg, 0, sizeof(gp_arg));
1168 gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
1169 if (have_drm_2_5)
1170 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1171 &gp_arg, sizeof(gp_arg));
1172 if (!have_drm_2_5 || ret) {
1173 /* Just guess a large enough value, around 800mb. */
1174 vws->ioctl.max_surface_memory = 0x30000000;
1175 } else {
1176 vws->ioctl.max_surface_memory = gp_arg.value;
1177 }
1178
1179 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1180
1181 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1182 }
1183
1184 /* Userspace surfaces are only supported on guest-backed hardware */
1185 vws->userspace_surface = false;
1186 getenv_val = getenv("VMW_SVGA_USERSPACE_SURFACE");
1187 if (getenv_val && atoi(getenv_val)) {
1188 assert(vws->base.have_gb_objects);
1189 assert(vws->base.have_vgpu10);
1190 memset(&gp_arg, 0, sizeof(gp_arg));
1191 gp_arg.param = DRM_VMW_PARAM_USER_SRF;
1192 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM, &gp_arg,
1193 sizeof(gp_arg));
1194 if (!ret && gp_arg.value == true) {
1195 vws->userspace_surface = true;
1196 debug_printf("Using userspace managed surfaces\n");
1197 }
1198 }
1199
1200 debug_printf("VGPU10 interface is %s.\n",
1201 vws->base.have_vgpu10 ? "on" : "off");
1202
1203 cap_buffer = calloc(1, size);
1204 if (!cap_buffer) {
1205 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1206 goto out_no_3d;
1207 }
1208
1209 vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
1210 sizeof(*vws->ioctl.cap_3d));
1211 if (!vws->ioctl.cap_3d) {
1212 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1213 goto out_no_caparray;
1214 }
1215
1216 memset(&cap_arg, 0, sizeof(cap_arg));
1217 cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
1218 cap_arg.max_size = size;
1219
1220 /*
1221 * This call must always be after DRM_VMW_PARAM_MAX_MOB_MEMORY and
1222 * DRM_VMW_PARAM_SM4_1. This is because, based on these calls, kernel
1223 * driver sends the supported cap.
1224 */
1225 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
1226 &cap_arg, sizeof(cap_arg));
1227
1228 if (ret) {
1229 debug_printf("Failed to get 3D capabilities"
1230 " (%i, %s).\n", ret, strerror(-ret));
1231 goto out_no_caps;
1232 }
1233
1234 ret = vmw_ioctl_parse_caps(vws, cap_buffer);
1235 if (ret) {
1236 debug_printf("Failed to parse 3D capabilities"
1237 " (%i, %s).\n", ret, strerror(-ret));
1238 goto out_no_caps;
1239 }
1240
1241 if (((version->version_major == 2 && version->version_minor >= 10)
1242 || version->version_major > 2) && vws->base.have_vgpu10) {
1243
1244 /* support for these commands didn't make it into vmwgfx kernel
1245 * modules before 2.10.
1246 */
1247 vws->base.have_generate_mipmap_cmd = true;
1248 vws->base.have_set_predication_cmd = true;
1249 }
1250
1251 if (version->version_major == 2 && version->version_minor >= 14) {
1252 vws->base.have_fence_fd = true;
1253 }
1254
1255 free(cap_buffer);
1256 drmFreeVersion(version);
1257 vmw_printf("%s OK\n", __func__);
1258 return true;
1259 out_no_caps:
1260 free(vws->ioctl.cap_3d);
1261 out_no_caparray:
1262 free(cap_buffer);
1263 out_no_3d:
1264 drmFreeVersion(version);
1265 out_no_version:
1266 vws->ioctl.num_cap_3d = 0;
1267 debug_printf("%s Failed\n", __func__);
1268 return false;
1269 }
1270
1271
1272
1273 void
vmw_ioctl_cleanup(struct vmw_winsys_screen * vws)1274 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
1275 {
1276 VMW_FUNC;
1277
1278 free(vws->ioctl.cap_3d);
1279 }
1280