1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file
28 *
29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
30 * drm winsys.
31 *
32 * Based on svgaicd_escape.c
33 */
34
35
36 #include "svga_cmd.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "state_tracker/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
44 #include "xf86drm.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
48
49 #include "os/os_mman.h"
50
51 #include <errno.h>
52 #include <unistd.h>
53
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
55 #define VMW_FENCE_TIMEOUT_SECONDS 60
56
57 struct vmw_region
58 {
59 uint32_t handle;
60 uint64_t map_handle;
61 void *data;
62 uint32_t map_count;
63 int drm_fd;
64 uint32_t size;
65 };
66
67 uint32_t
vmw_region_size(struct vmw_region * region)68 vmw_region_size(struct vmw_region *region)
69 {
70 return region->size;
71 }
72
73 #if defined(__DragonFly__) || defined(__FreeBSD__) || \
74 defined(__NetBSD__) || defined(__OpenBSD__)
75 #define ERESTART EINTR
76 #endif
77
78 uint32
vmw_ioctl_context_create(struct vmw_winsys_screen * vws)79 vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
80 {
81 struct drm_vmw_context_arg c_arg;
82 int ret;
83
84 VMW_FUNC;
85
86 ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
87 &c_arg, sizeof(c_arg));
88
89 if (ret)
90 return -1;
91
92 vmw_printf("Context id is %d\n", c_arg.cid);
93 return c_arg.cid;
94 }
95
96 uint32
vmw_ioctl_extended_context_create(struct vmw_winsys_screen * vws,boolean vgpu10)97 vmw_ioctl_extended_context_create(struct vmw_winsys_screen *vws,
98 boolean vgpu10)
99 {
100 union drm_vmw_extended_context_arg c_arg;
101 int ret;
102
103 VMW_FUNC;
104 memset(&c_arg, 0, sizeof(c_arg));
105 c_arg.req = (vgpu10 ? drm_vmw_context_vgpu10 : drm_vmw_context_legacy);
106 ret = drmCommandWriteRead(vws->ioctl.drm_fd,
107 DRM_VMW_CREATE_EXTENDED_CONTEXT,
108 &c_arg, sizeof(c_arg));
109
110 if (ret)
111 return -1;
112
113 vmw_printf("Context id is %d\n", c_arg.cid);
114 return c_arg.rep.cid;
115 }
116
117 void
vmw_ioctl_context_destroy(struct vmw_winsys_screen * vws,uint32 cid)118 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
119 {
120 struct drm_vmw_context_arg c_arg;
121
122 VMW_FUNC;
123
124 memset(&c_arg, 0, sizeof(c_arg));
125 c_arg.cid = cid;
126
127 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
128 &c_arg, sizeof(c_arg));
129
130 }
131
132 uint32
vmw_ioctl_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurfaceFlags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount)133 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
134 SVGA3dSurfaceFlags flags,
135 SVGA3dSurfaceFormat format,
136 unsigned usage,
137 SVGA3dSize size,
138 uint32_t numFaces, uint32_t numMipLevels,
139 unsigned sampleCount)
140 {
141 union drm_vmw_surface_create_arg s_arg;
142 struct drm_vmw_surface_create_req *req = &s_arg.req;
143 struct drm_vmw_surface_arg *rep = &s_arg.rep;
144 struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
145 DRM_VMW_MAX_MIP_LEVELS];
146 struct drm_vmw_size *cur_size;
147 uint32_t iFace;
148 uint32_t iMipLevel;
149 int ret;
150
151 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
152
153 memset(&s_arg, 0, sizeof(s_arg));
154 req->flags = (uint32_t) flags;
155 req->scanout = !!(usage & SVGA_SURFACE_USAGE_SCANOUT);
156 req->format = (uint32_t) format;
157 req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED);
158
159 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
160 DRM_VMW_MAX_MIP_LEVELS);
161 cur_size = sizes;
162 for (iFace = 0; iFace < numFaces; ++iFace) {
163 SVGA3dSize mipSize = size;
164
165 req->mip_levels[iFace] = numMipLevels;
166 for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
167 cur_size->width = mipSize.width;
168 cur_size->height = mipSize.height;
169 cur_size->depth = mipSize.depth;
170 mipSize.width = MAX2(mipSize.width >> 1, 1);
171 mipSize.height = MAX2(mipSize.height >> 1, 1);
172 mipSize.depth = MAX2(mipSize.depth >> 1, 1);
173 cur_size++;
174 }
175 }
176 for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
177 req->mip_levels[iFace] = 0;
178 }
179
180 req->size_addr = (unsigned long)&sizes;
181
182 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
183 &s_arg, sizeof(s_arg));
184
185 if (ret)
186 return -1;
187
188 vmw_printf("Surface id is %d\n", rep->sid);
189
190 return rep->sid;
191 }
192
193
194 uint32
vmw_ioctl_gb_surface_create(struct vmw_winsys_screen * vws,SVGA3dSurfaceFlags flags,SVGA3dSurfaceFormat format,unsigned usage,SVGA3dSize size,uint32_t numFaces,uint32_t numMipLevels,unsigned sampleCount,uint32_t buffer_handle,struct vmw_region ** p_region)195 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
196 SVGA3dSurfaceFlags flags,
197 SVGA3dSurfaceFormat format,
198 unsigned usage,
199 SVGA3dSize size,
200 uint32_t numFaces,
201 uint32_t numMipLevels,
202 unsigned sampleCount,
203 uint32_t buffer_handle,
204 struct vmw_region **p_region)
205 {
206 union drm_vmw_gb_surface_create_arg s_arg;
207 struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
208 struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep;
209 struct vmw_region *region = NULL;
210 int ret;
211
212 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
213
214 if (p_region) {
215 region = CALLOC_STRUCT(vmw_region);
216 if (!region)
217 return SVGA3D_INVALID_ID;
218 }
219
220 memset(&s_arg, 0, sizeof(s_arg));
221 req->svga3d_flags = (uint32_t) flags;
222 if (usage & SVGA_SURFACE_USAGE_SCANOUT)
223 req->drm_surface_flags |= drm_vmw_surface_flag_scanout;
224 req->format = (uint32_t) format;
225 if (usage & SVGA_SURFACE_USAGE_SHARED)
226 req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
227 req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
228 req->base_size.width = size.width;
229 req->base_size.height = size.height;
230 req->base_size.depth = size.depth;
231 req->mip_levels = numMipLevels;
232 req->multisample_count = 0;
233 req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
234
235 if (vws->base.have_vgpu10) {
236 req->array_size = numFaces;
237 req->multisample_count = sampleCount;
238 } else {
239 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
240 DRM_VMW_MAX_MIP_LEVELS);
241 req->array_size = 0;
242 }
243
244 if (buffer_handle)
245 req->buffer_handle = buffer_handle;
246 else
247 req->buffer_handle = SVGA3D_INVALID_ID;
248
249 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
250 &s_arg, sizeof(s_arg));
251
252 if (ret)
253 goto out_fail_create;
254
255 if (p_region) {
256 region->handle = rep->buffer_handle;
257 region->map_handle = rep->buffer_map_handle;
258 region->drm_fd = vws->ioctl.drm_fd;
259 region->size = rep->backup_size;
260 *p_region = region;
261 }
262
263 vmw_printf("Surface id is %d\n", rep->sid);
264 return rep->handle;
265
266 out_fail_create:
267 FREE(region);
268 return SVGA3D_INVALID_ID;
269 }
270
271 /**
272 * vmw_ioctl_surface_req - Fill in a struct surface_req
273 *
274 * @vws: Winsys screen
275 * @whandle: Surface handle
276 * @req: The struct surface req to fill in
277 * @needs_unref: This call takes a kernel surface reference that needs to
278 * be unreferenced.
279 *
280 * Returns 0 on success, negative error type otherwise.
281 * Fills in the surface_req structure according to handle type and kernel
282 * capabilities.
283 */
284 static int
vmw_ioctl_surface_req(const struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,struct drm_vmw_surface_arg * req,boolean * needs_unref)285 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
286 const struct winsys_handle *whandle,
287 struct drm_vmw_surface_arg *req,
288 boolean *needs_unref)
289 {
290 int ret;
291
292 switch(whandle->type) {
293 case DRM_API_HANDLE_TYPE_SHARED:
294 case DRM_API_HANDLE_TYPE_KMS:
295 *needs_unref = FALSE;
296 req->handle_type = DRM_VMW_HANDLE_LEGACY;
297 req->sid = whandle->handle;
298 break;
299 case DRM_API_HANDLE_TYPE_FD:
300 if (!vws->ioctl.have_drm_2_6) {
301 uint32_t handle;
302
303 ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
304 if (ret) {
305 vmw_error("Failed to get handle from prime fd %d.\n",
306 (int) whandle->handle);
307 return -EINVAL;
308 }
309
310 *needs_unref = TRUE;
311 req->handle_type = DRM_VMW_HANDLE_LEGACY;
312 req->sid = handle;
313 } else {
314 *needs_unref = FALSE;
315 req->handle_type = DRM_VMW_HANDLE_PRIME;
316 req->sid = whandle->handle;
317 }
318 break;
319 default:
320 vmw_error("Attempt to import unsupported handle type %d.\n",
321 whandle->type);
322 return -EINVAL;
323 }
324
325 return 0;
326 }
327
328 /**
329 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
330 * get surface information
331 *
332 * @vws: Screen to register the reference on
333 * @handle: Kernel handle of the guest-backed surface
334 * @flags: flags used when the surface was created
335 * @format: Format used when the surface was created
336 * @numMipLevels: Number of mipmap levels of the surface
337 * @p_region: On successful return points to a newly allocated
338 * struct vmw_region holding a reference to the surface backup buffer.
339 *
340 * Returns 0 on success, a system error on failure.
341 */
342 int
vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen * vws,const struct winsys_handle * whandle,SVGA3dSurfaceFlags * flags,SVGA3dSurfaceFormat * format,uint32_t * numMipLevels,uint32_t * handle,struct vmw_region ** p_region)343 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
344 const struct winsys_handle *whandle,
345 SVGA3dSurfaceFlags *flags,
346 SVGA3dSurfaceFormat *format,
347 uint32_t *numMipLevels,
348 uint32_t *handle,
349 struct vmw_region **p_region)
350 {
351 union drm_vmw_gb_surface_reference_arg s_arg;
352 struct drm_vmw_surface_arg *req = &s_arg.req;
353 struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
354 struct vmw_region *region = NULL;
355 boolean needs_unref = FALSE;
356 int ret;
357
358 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
359
360 assert(p_region != NULL);
361 region = CALLOC_STRUCT(vmw_region);
362 if (!region)
363 return -ENOMEM;
364
365 memset(&s_arg, 0, sizeof(s_arg));
366 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
367 if (ret)
368 goto out_fail_req;
369
370 *handle = req->sid;
371 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
372 &s_arg, sizeof(s_arg));
373
374 if (ret)
375 goto out_fail_ref;
376
377 region->handle = rep->crep.buffer_handle;
378 region->map_handle = rep->crep.buffer_map_handle;
379 region->drm_fd = vws->ioctl.drm_fd;
380 region->size = rep->crep.backup_size;
381 *p_region = region;
382
383 *handle = rep->crep.handle;
384 *flags = rep->creq.svga3d_flags;
385 *format = rep->creq.format;
386 *numMipLevels = rep->creq.mip_levels;
387
388 if (needs_unref)
389 vmw_ioctl_surface_destroy(vws, *handle);
390
391 return 0;
392 out_fail_ref:
393 if (needs_unref)
394 vmw_ioctl_surface_destroy(vws, *handle);
395 out_fail_req:
396 FREE(region);
397 return ret;
398 }
399
400 void
vmw_ioctl_surface_destroy(struct vmw_winsys_screen * vws,uint32 sid)401 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
402 {
403 struct drm_vmw_surface_arg s_arg;
404
405 VMW_FUNC;
406
407 memset(&s_arg, 0, sizeof(s_arg));
408 s_arg.sid = sid;
409
410 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
411 &s_arg, sizeof(s_arg));
412 }
413
414 void
vmw_ioctl_command(struct vmw_winsys_screen * vws,int32_t cid,uint32_t throttle_us,void * commands,uint32_t size,struct pipe_fence_handle ** pfence,int32_t imported_fence_fd,uint32_t flags)415 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
416 uint32_t throttle_us, void *commands, uint32_t size,
417 struct pipe_fence_handle **pfence, int32_t imported_fence_fd,
418 uint32_t flags)
419 {
420 struct drm_vmw_execbuf_arg arg;
421 struct drm_vmw_fence_rep rep;
422 int ret;
423 int argsize;
424
425 #ifdef DEBUG
426 {
427 static boolean firsttime = TRUE;
428 static boolean debug = FALSE;
429 static boolean skip = FALSE;
430 if (firsttime) {
431 debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE);
432 skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE);
433 }
434 if (debug) {
435 VMW_FUNC;
436 svga_dump_commands(commands, size);
437 }
438 firsttime = FALSE;
439 if (skip) {
440 size = 0;
441 }
442 }
443 #endif
444
445 memset(&arg, 0, sizeof(arg));
446 memset(&rep, 0, sizeof(rep));
447
448 if (flags & SVGA_HINT_FLAG_EXPORT_FENCE_FD) {
449 arg.flags |= DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD;
450 }
451
452 if (imported_fence_fd != -1) {
453 arg.flags |= DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD;
454 }
455
456 rep.error = -EFAULT;
457 if (pfence)
458 arg.fence_rep = (unsigned long)&rep;
459 arg.commands = (unsigned long)commands;
460 arg.command_size = size;
461 arg.throttle_us = throttle_us;
462 arg.version = vws->ioctl.drm_execbuf_version;
463 arg.context_handle = (vws->base.have_vgpu10 ? cid : SVGA3D_INVALID_ID);
464
465 /* Older DRM module requires this to be zero */
466 if (vws->base.have_fence_fd)
467 arg.imported_fence_fd = imported_fence_fd;
468
469 /* In DRM_VMW_EXECBUF_VERSION 1, the drm_vmw_execbuf_arg structure ends with
470 * the flags field. The structure size sent to drmCommandWrite must match
471 * the drm_execbuf_version. Otherwise, an invalid value will be returned.
472 */
473 argsize = vws->ioctl.drm_execbuf_version > 1 ? sizeof(arg) :
474 offsetof(struct drm_vmw_execbuf_arg, context_handle);
475 do {
476 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, argsize);
477 } while(ret == -ERESTART);
478 if (ret) {
479 vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret));
480 abort();
481 }
482
483 if (rep.error) {
484
485 /*
486 * Kernel has already synced, or caller requested no fence.
487 */
488 if (pfence)
489 *pfence = NULL;
490 } else {
491 if (pfence) {
492 vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
493 TRUE);
494
495 /* Older DRM module will set this to zero, but -1 is the proper FD
496 * to use for no Fence FD support */
497 if (!vws->base.have_fence_fd)
498 rep.fd = -1;
499
500 *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
501 rep.seqno, rep.mask, rep.fd);
502 if (*pfence == NULL) {
503 /*
504 * Fence creation failed. Need to sync.
505 */
506 (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
507 vmw_ioctl_fence_unref(vws, rep.handle);
508 }
509 }
510 }
511 }
512
513
514 struct vmw_region *
vmw_ioctl_region_create(struct vmw_winsys_screen * vws,uint32_t size)515 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
516 {
517 struct vmw_region *region;
518 union drm_vmw_alloc_dmabuf_arg arg;
519 struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
520 struct drm_vmw_dmabuf_rep *rep = &arg.rep;
521 int ret;
522
523 vmw_printf("%s: size = %u\n", __FUNCTION__, size);
524
525 region = CALLOC_STRUCT(vmw_region);
526 if (!region)
527 goto out_err1;
528
529 memset(&arg, 0, sizeof(arg));
530 req->size = size;
531 do {
532 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
533 sizeof(arg));
534 } while (ret == -ERESTART);
535
536 if (ret) {
537 vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
538 goto out_err1;
539 }
540
541 region->data = NULL;
542 region->handle = rep->handle;
543 region->map_handle = rep->map_handle;
544 region->map_count = 0;
545 region->size = size;
546 region->drm_fd = vws->ioctl.drm_fd;
547
548 vmw_printf(" gmrId = %u, offset = %u\n",
549 region->ptr.gmrId, region->ptr.offset);
550
551 return region;
552
553 out_err1:
554 FREE(region);
555 return NULL;
556 }
557
558 void
vmw_ioctl_region_destroy(struct vmw_region * region)559 vmw_ioctl_region_destroy(struct vmw_region *region)
560 {
561 struct drm_vmw_unref_dmabuf_arg arg;
562
563 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
564 region->ptr.gmrId, region->ptr.offset);
565
566 if (region->data) {
567 os_munmap(region->data, region->size);
568 region->data = NULL;
569 }
570
571 memset(&arg, 0, sizeof(arg));
572 arg.handle = region->handle;
573 drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
574
575 FREE(region);
576 }
577
578 SVGAGuestPtr
vmw_ioctl_region_ptr(struct vmw_region * region)579 vmw_ioctl_region_ptr(struct vmw_region *region)
580 {
581 SVGAGuestPtr ptr = {region->handle, 0};
582 return ptr;
583 }
584
585 void *
vmw_ioctl_region_map(struct vmw_region * region)586 vmw_ioctl_region_map(struct vmw_region *region)
587 {
588 void *map;
589
590 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
591 region->ptr.gmrId, region->ptr.offset);
592
593 if (region->data == NULL) {
594 map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
595 region->drm_fd, region->map_handle);
596 if (map == MAP_FAILED) {
597 vmw_error("%s: Map failed.\n", __FUNCTION__);
598 return NULL;
599 }
600
601 region->data = map;
602 }
603
604 ++region->map_count;
605
606 return region->data;
607 }
608
609 void
vmw_ioctl_region_unmap(struct vmw_region * region)610 vmw_ioctl_region_unmap(struct vmw_region *region)
611 {
612 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
613 region->ptr.gmrId, region->ptr.offset);
614 --region->map_count;
615 }
616
617 /**
618 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
619 *
620 * @region: Pointer to a struct vmw_region representing the buffer object.
621 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
622 * GPU is busy with the buffer object.
623 * @readonly: Hint that the CPU access is read-only.
624 * @allow_cs: Allow concurrent command submission while the buffer is
625 * synchronized for CPU. If FALSE command submissions referencing the
626 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
627 *
628 * This function idles any GPU activities touching the buffer and blocks
629 * command submission of commands referencing the buffer, even from
630 * other processes.
631 */
632 int
vmw_ioctl_syncforcpu(struct vmw_region * region,boolean dont_block,boolean readonly,boolean allow_cs)633 vmw_ioctl_syncforcpu(struct vmw_region *region,
634 boolean dont_block,
635 boolean readonly,
636 boolean allow_cs)
637 {
638 struct drm_vmw_synccpu_arg arg;
639
640 memset(&arg, 0, sizeof(arg));
641 arg.op = drm_vmw_synccpu_grab;
642 arg.handle = region->handle;
643 arg.flags = drm_vmw_synccpu_read;
644 if (!readonly)
645 arg.flags |= drm_vmw_synccpu_write;
646 if (dont_block)
647 arg.flags |= drm_vmw_synccpu_dontblock;
648 if (allow_cs)
649 arg.flags |= drm_vmw_synccpu_allow_cs;
650
651 return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
652 }
653
654 /**
655 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
656 *
657 * @region: Pointer to a struct vmw_region representing the buffer object.
658 * @readonly: Should hold the same value as the matching syncforcpu call.
659 * @allow_cs: Should hold the same value as the matching syncforcpu call.
660 */
661 void
vmw_ioctl_releasefromcpu(struct vmw_region * region,boolean readonly,boolean allow_cs)662 vmw_ioctl_releasefromcpu(struct vmw_region *region,
663 boolean readonly,
664 boolean allow_cs)
665 {
666 struct drm_vmw_synccpu_arg arg;
667
668 memset(&arg, 0, sizeof(arg));
669 arg.op = drm_vmw_synccpu_release;
670 arg.handle = region->handle;
671 arg.flags = drm_vmw_synccpu_read;
672 if (!readonly)
673 arg.flags |= drm_vmw_synccpu_write;
674 if (allow_cs)
675 arg.flags |= drm_vmw_synccpu_allow_cs;
676
677 (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
678 }
679
680 void
vmw_ioctl_fence_unref(struct vmw_winsys_screen * vws,uint32_t handle)681 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
682 uint32_t handle)
683 {
684 struct drm_vmw_fence_arg arg;
685 int ret;
686
687 memset(&arg, 0, sizeof(arg));
688 arg.handle = handle;
689
690 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
691 &arg, sizeof(arg));
692 if (ret != 0)
693 vmw_error("%s Failed\n", __FUNCTION__);
694 }
695
696 static inline uint32_t
vmw_drm_fence_flags(uint32_t flags)697 vmw_drm_fence_flags(uint32_t flags)
698 {
699 uint32_t dflags = 0;
700
701 if (flags & SVGA_FENCE_FLAG_EXEC)
702 dflags |= DRM_VMW_FENCE_FLAG_EXEC;
703 if (flags & SVGA_FENCE_FLAG_QUERY)
704 dflags |= DRM_VMW_FENCE_FLAG_QUERY;
705
706 return dflags;
707 }
708
709
710 int
vmw_ioctl_fence_signalled(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)711 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
712 uint32_t handle,
713 uint32_t flags)
714 {
715 struct drm_vmw_fence_signaled_arg arg;
716 uint32_t vflags = vmw_drm_fence_flags(flags);
717 int ret;
718
719 memset(&arg, 0, sizeof(arg));
720 arg.handle = handle;
721 arg.flags = vflags;
722
723 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
724 &arg, sizeof(arg));
725
726 if (ret != 0)
727 return ret;
728
729 vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, FALSE);
730
731 return (arg.signaled) ? 0 : -1;
732 }
733
734
735
736 int
vmw_ioctl_fence_finish(struct vmw_winsys_screen * vws,uint32_t handle,uint32_t flags)737 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
738 uint32_t handle,
739 uint32_t flags)
740 {
741 struct drm_vmw_fence_wait_arg arg;
742 uint32_t vflags = vmw_drm_fence_flags(flags);
743 int ret;
744
745 memset(&arg, 0, sizeof(arg));
746
747 arg.handle = handle;
748 arg.timeout_us = VMW_FENCE_TIMEOUT_SECONDS*1000000;
749 arg.lazy = 0;
750 arg.flags = vflags;
751
752 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
753 &arg, sizeof(arg));
754
755 if (ret != 0)
756 vmw_error("%s Failed\n", __FUNCTION__);
757
758 return 0;
759 }
760
761 uint32
vmw_ioctl_shader_create(struct vmw_winsys_screen * vws,SVGA3dShaderType type,uint32 code_len)762 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
763 SVGA3dShaderType type,
764 uint32 code_len)
765 {
766 struct drm_vmw_shader_create_arg sh_arg;
767 int ret;
768
769 VMW_FUNC;
770
771 memset(&sh_arg, 0, sizeof(sh_arg));
772
773 sh_arg.size = code_len;
774 sh_arg.buffer_handle = SVGA3D_INVALID_ID;
775 sh_arg.shader_handle = SVGA3D_INVALID_ID;
776 switch (type) {
777 case SVGA3D_SHADERTYPE_VS:
778 sh_arg.shader_type = drm_vmw_shader_type_vs;
779 break;
780 case SVGA3D_SHADERTYPE_PS:
781 sh_arg.shader_type = drm_vmw_shader_type_ps;
782 break;
783 default:
784 assert(!"Invalid shader type.");
785 break;
786 }
787
788 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
789 &sh_arg, sizeof(sh_arg));
790
791 if (ret)
792 return SVGA3D_INVALID_ID;
793
794 return sh_arg.shader_handle;
795 }
796
797 void
vmw_ioctl_shader_destroy(struct vmw_winsys_screen * vws,uint32 shid)798 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
799 {
800 struct drm_vmw_shader_arg sh_arg;
801
802 VMW_FUNC;
803
804 memset(&sh_arg, 0, sizeof(sh_arg));
805 sh_arg.handle = shid;
806
807 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
808 &sh_arg, sizeof(sh_arg));
809
810 }
811
812 static int
vmw_ioctl_parse_caps(struct vmw_winsys_screen * vws,const uint32_t * cap_buffer)813 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
814 const uint32_t *cap_buffer)
815 {
816 int i;
817
818 if (vws->base.have_gb_objects) {
819 for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
820 vws->ioctl.cap_3d[i].has_cap = TRUE;
821 vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
822 }
823 return 0;
824 } else {
825 const uint32 *capsBlock;
826 const SVGA3dCapsRecord *capsRecord = NULL;
827 uint32 offset;
828 const SVGA3dCapPair *capArray;
829 int numCaps, index;
830
831 /*
832 * Search linearly through the caps block records for the specified type.
833 */
834 capsBlock = cap_buffer;
835 for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
836 const SVGA3dCapsRecord *record;
837 assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
838 record = (const SVGA3dCapsRecord *) (capsBlock + offset);
839 if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) &&
840 (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) &&
841 (!capsRecord || (record->header.type > capsRecord->header.type))) {
842 capsRecord = record;
843 }
844 }
845
846 if(!capsRecord)
847 return -1;
848
849 /*
850 * Calculate the number of caps from the size of the record.
851 */
852 capArray = (const SVGA3dCapPair *) capsRecord->data;
853 numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
854 sizeof capsRecord->header) / (2 * sizeof(uint32)));
855
856 for (i = 0; i < numCaps; i++) {
857 index = capArray[i][0];
858 if (index < vws->ioctl.num_cap_3d) {
859 vws->ioctl.cap_3d[index].has_cap = TRUE;
860 vws->ioctl.cap_3d[index].result.u = capArray[i][1];
861 } else {
862 debug_printf("Unknown devcaps seen: %d\n", index);
863 }
864 }
865 }
866 return 0;
867 }
868
869 boolean
vmw_ioctl_init(struct vmw_winsys_screen * vws)870 vmw_ioctl_init(struct vmw_winsys_screen *vws)
871 {
872 struct drm_vmw_getparam_arg gp_arg;
873 struct drm_vmw_get_3d_cap_arg cap_arg;
874 unsigned int size;
875 int ret;
876 uint32_t *cap_buffer;
877 drmVersionPtr version;
878 boolean drm_gb_capable;
879 boolean have_drm_2_5;
880
881 VMW_FUNC;
882
883 version = drmGetVersion(vws->ioctl.drm_fd);
884 if (!version)
885 goto out_no_version;
886
887 have_drm_2_5 = version->version_major > 2 ||
888 (version->version_major == 2 && version->version_minor > 4);
889 vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
890 (version->version_major == 2 && version->version_minor > 5);
891 vws->ioctl.have_drm_2_9 = version->version_major > 2 ||
892 (version->version_major == 2 && version->version_minor > 8);
893
894 vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
895
896 drm_gb_capable = have_drm_2_5;
897
898 memset(&gp_arg, 0, sizeof(gp_arg));
899 gp_arg.param = DRM_VMW_PARAM_3D;
900 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
901 &gp_arg, sizeof(gp_arg));
902 if (ret || gp_arg.value == 0) {
903 vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
904 goto out_no_3d;
905 }
906
907 memset(&gp_arg, 0, sizeof(gp_arg));
908 gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
909 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
910 &gp_arg, sizeof(gp_arg));
911 if (ret) {
912 vmw_error("Failed to get fifo hw version (%i, %s).\n",
913 ret, strerror(-ret));
914 goto out_no_3d;
915 }
916 vws->ioctl.hwversion = gp_arg.value;
917
918 memset(&gp_arg, 0, sizeof(gp_arg));
919 gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
920 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
921 &gp_arg, sizeof(gp_arg));
922 if (ret)
923 vws->base.have_gb_objects = FALSE;
924 else
925 vws->base.have_gb_objects =
926 !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
927
928 if (vws->base.have_gb_objects && !drm_gb_capable)
929 goto out_no_3d;
930
931 vws->base.have_vgpu10 = FALSE;
932 if (vws->base.have_gb_objects) {
933 memset(&gp_arg, 0, sizeof(gp_arg));
934 gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
935 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
936 &gp_arg, sizeof(gp_arg));
937 if (ret)
938 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
939 else
940 size = gp_arg.value;
941
942 if (vws->base.have_gb_objects)
943 vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
944 else
945 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
946
947
948 memset(&gp_arg, 0, sizeof(gp_arg));
949 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
950 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
951 &gp_arg, sizeof(gp_arg));
952 if (ret) {
953 /* Just guess a large enough value. */
954 vws->ioctl.max_mob_memory = 256*1024*1024;
955 } else {
956 vws->ioctl.max_mob_memory = gp_arg.value;
957 }
958
959 memset(&gp_arg, 0, sizeof(gp_arg));
960 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
961 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
962 &gp_arg, sizeof(gp_arg));
963
964 if (ret || gp_arg.value == 0) {
965 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
966 } else {
967 vws->ioctl.max_texture_size = gp_arg.value;
968 }
969
970 /* Never early flush surfaces, mobs do accounting. */
971 vws->ioctl.max_surface_memory = -1;
972
973 if (vws->ioctl.have_drm_2_9) {
974
975 memset(&gp_arg, 0, sizeof(gp_arg));
976 gp_arg.param = DRM_VMW_PARAM_VGPU10;
977 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
978 &gp_arg, sizeof(gp_arg));
979 if (ret == 0 && gp_arg.value != 0) {
980 const char *vgpu10_val;
981
982 debug_printf("Have VGPU10 interface and hardware.\n");
983 vws->base.have_vgpu10 = TRUE;
984 vgpu10_val = getenv("SVGA_VGPU10");
985 if (vgpu10_val && strcmp(vgpu10_val, "0") == 0) {
986 debug_printf("Disabling VGPU10 interface.\n");
987 vws->base.have_vgpu10 = FALSE;
988 } else {
989 debug_printf("Enabling VGPU10 interface.\n");
990 }
991 }
992 }
993 } else {
994 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
995
996 memset(&gp_arg, 0, sizeof(gp_arg));
997 gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
998 if (have_drm_2_5)
999 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
1000 &gp_arg, sizeof(gp_arg));
1001 if (!have_drm_2_5 || ret) {
1002 /* Just guess a large enough value, around 800mb. */
1003 vws->ioctl.max_surface_memory = 0x30000000;
1004 } else {
1005 vws->ioctl.max_surface_memory = gp_arg.value;
1006 }
1007
1008 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
1009
1010 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
1011 }
1012
1013 debug_printf("VGPU10 interface is %s.\n",
1014 vws->base.have_vgpu10 ? "on" : "off");
1015
1016 cap_buffer = calloc(1, size);
1017 if (!cap_buffer) {
1018 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1019 goto out_no_3d;
1020 }
1021
1022 vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
1023 sizeof(*vws->ioctl.cap_3d));
1024 if (!vws->ioctl.cap_3d) {
1025 debug_printf("Failed alloc fifo 3D caps buffer.\n");
1026 goto out_no_caparray;
1027 }
1028
1029 memset(&cap_arg, 0, sizeof(cap_arg));
1030 cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
1031 cap_arg.max_size = size;
1032
1033 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
1034 &cap_arg, sizeof(cap_arg));
1035
1036 if (ret) {
1037 debug_printf("Failed to get 3D capabilities"
1038 " (%i, %s).\n", ret, strerror(-ret));
1039 goto out_no_caps;
1040 }
1041
1042 ret = vmw_ioctl_parse_caps(vws, cap_buffer);
1043 if (ret) {
1044 debug_printf("Failed to parse 3D capabilities"
1045 " (%i, %s).\n", ret, strerror(-ret));
1046 goto out_no_caps;
1047 }
1048
1049 if (((version->version_major == 2 && version->version_minor >= 10)
1050 || version->version_major > 2) && vws->base.have_vgpu10) {
1051
1052 /* support for these commands didn't make it into vmwgfx kernel
1053 * modules before 2.10.
1054 */
1055 vws->base.have_generate_mipmap_cmd = TRUE;
1056 vws->base.have_set_predication_cmd = TRUE;
1057 }
1058
1059 if (version->version_major == 2 && version->version_minor >= 14) {
1060 vws->base.have_fence_fd = TRUE;
1061 }
1062
1063 free(cap_buffer);
1064 drmFreeVersion(version);
1065 vmw_printf("%s OK\n", __FUNCTION__);
1066 return TRUE;
1067 out_no_caps:
1068 free(vws->ioctl.cap_3d);
1069 out_no_caparray:
1070 free(cap_buffer);
1071 out_no_3d:
1072 drmFreeVersion(version);
1073 out_no_version:
1074 vws->ioctl.num_cap_3d = 0;
1075 debug_printf("%s Failed\n", __FUNCTION__);
1076 return FALSE;
1077 }
1078
1079
1080
1081 void
vmw_ioctl_cleanup(struct vmw_winsys_screen * vws)1082 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
1083 {
1084 VMW_FUNC;
1085 }
1086