1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25 #include <libsync.h>
26
27 #include "util/u_memory.h"
28 #include "util/u_atomic.h"
29 #include "util/list.h"
30 #include "os/os_thread.h"
31
32 #include "pipebuffer/pb_buffer_fenced.h"
33
34 #include "vmw_screen.h"
35 #include "vmw_fence.h"
36
37 struct vmw_fence_ops
38 {
39 /*
40 * Immutable members.
41 */
42 struct pb_fence_ops base;
43 struct vmw_winsys_screen *vws;
44
45 mtx_t mutex;
46
47 /*
48 * Protected by mutex;
49 */
50 struct list_head not_signaled;
51 uint32_t last_signaled;
52 uint32_t last_emitted;
53 };
54
55 struct vmw_fence
56 {
57 struct list_head ops_list;
58 int32_t refcount;
59 uint32_t handle;
60 uint32_t mask;
61 int32_t signalled;
62 uint32_t seqno;
63 int32_t fence_fd;
64 boolean imported; /* TRUE if imported from another process */
65 };
66
67 /**
68 * vmw_fence_seq_is_signaled - Check whether a fence seqno is
69 * signaled.
70 *
71 * @ops: Pointer to a struct pb_fence_ops.
72 *
73 */
74 static inline boolean
vmw_fence_seq_is_signaled(uint32_t seq,uint32_t last,uint32_t cur)75 vmw_fence_seq_is_signaled(uint32_t seq, uint32_t last, uint32_t cur)
76 {
77 return (cur - last <= cur - seq);
78 }
79
80
81 /**
82 * vmw_fence_ops - Return the vmw_fence_ops structure backing a
83 * struct pb_fence_ops pointer.
84 *
85 * @ops: Pointer to a struct pb_fence_ops.
86 *
87 */
88 static inline struct vmw_fence_ops *
vmw_fence_ops(struct pb_fence_ops * ops)89 vmw_fence_ops(struct pb_fence_ops *ops)
90 {
91 assert(ops);
92 return (struct vmw_fence_ops *)ops;
93 }
94
95
96 /**
97 * vmw_fences_release - Release all fences from the not_signaled
98 * list.
99 *
100 * @ops: Pointer to a struct vmw_fence_ops.
101 *
102 */
103 static void
vmw_fences_release(struct vmw_fence_ops * ops)104 vmw_fences_release(struct vmw_fence_ops *ops)
105 {
106 struct vmw_fence *fence, *n;
107
108 mtx_lock(&ops->mutex);
109 LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
110 list_delinit(&fence->ops_list);
111 mtx_unlock(&ops->mutex);
112 }
113
114 /**
115 * vmw_fences_signal - Traverse the not_signaled list and try to
116 * signal unsignaled fences.
117 *
118 * @ops: Pointer to a struct pb_fence_ops.
119 * @signaled: Seqno that has signaled.
120 * @emitted: Last seqno emitted by the kernel.
121 * @has_emitted: Whether we provide the emitted value.
122 *
123 */
124 void
vmw_fences_signal(struct pb_fence_ops * fence_ops,uint32_t signaled,uint32_t emitted,boolean has_emitted)125 vmw_fences_signal(struct pb_fence_ops *fence_ops,
126 uint32_t signaled,
127 uint32_t emitted,
128 boolean has_emitted)
129 {
130 struct vmw_fence_ops *ops = NULL;
131 struct vmw_fence *fence, *n;
132
133 if (fence_ops == NULL)
134 return;
135
136 ops = vmw_fence_ops(fence_ops);
137 mtx_lock(&ops->mutex);
138
139 if (!has_emitted) {
140 emitted = ops->last_emitted;
141 if (emitted - signaled > (1 << 30))
142 emitted = signaled;
143 }
144
145 if (signaled == ops->last_signaled && emitted == ops->last_emitted)
146 goto out_unlock;
147
148 LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) {
149 if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
150 break;
151
152 p_atomic_set(&fence->signalled, 1);
153 list_delinit(&fence->ops_list);
154 }
155 ops->last_signaled = signaled;
156 ops->last_emitted = emitted;
157
158 out_unlock:
159 mtx_unlock(&ops->mutex);
160 }
161
162
163 /**
164 * vmw_fence - return the vmw_fence object identified by a
165 * struct pipe_fence_handle *
166 *
167 * @fence: The opaque pipe fence handle.
168 */
169 static inline struct vmw_fence *
vmw_fence(struct pipe_fence_handle * fence)170 vmw_fence(struct pipe_fence_handle *fence)
171 {
172 return (struct vmw_fence *) fence;
173 }
174
175
176 /**
177 * vmw_fence_create - Create a user-space fence object.
178 *
179 * @fence_ops: The fence_ops manager to register with.
180 * @handle: Handle identifying the kernel fence object.
181 * @mask: Mask of flags that this fence object may signal.
182 * @fd: File descriptor to associate with the fence
183 *
184 * Returns NULL on failure.
185 */
186 struct pipe_fence_handle *
vmw_fence_create(struct pb_fence_ops * fence_ops,uint32_t handle,uint32_t seqno,uint32_t mask,int32_t fd)187 vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
188 uint32_t seqno, uint32_t mask, int32_t fd)
189 {
190 struct vmw_fence *fence = CALLOC_STRUCT(vmw_fence);
191 struct vmw_fence_ops *ops = NULL;
192
193 if (!fence)
194 return NULL;
195
196 p_atomic_set(&fence->refcount, 1);
197 fence->handle = handle;
198 fence->mask = mask;
199 fence->seqno = seqno;
200 fence->fence_fd = fd;
201 p_atomic_set(&fence->signalled, 0);
202
203 /*
204 * If the fence was not created by our device, then we won't
205 * manage it with our ops
206 */
207 if (!fence_ops) {
208 fence->imported = true;
209 return (struct pipe_fence_handle *) fence;
210 }
211
212 ops = vmw_fence_ops(fence_ops);
213
214 mtx_lock(&ops->mutex);
215
216 if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
217 p_atomic_set(&fence->signalled, 1);
218 list_inithead(&fence->ops_list);
219 } else {
220 p_atomic_set(&fence->signalled, 0);
221 list_addtail(&fence->ops_list, &ops->not_signaled);
222 }
223
224 mtx_unlock(&ops->mutex);
225
226 return (struct pipe_fence_handle *) fence;
227 }
228
229
230 /**
231 * vmw_fence_destroy - Frees a vmw fence object.
232 *
233 * Also closes the file handle associated with the object, if any
234 */
235 static
vmw_fence_destroy(struct vmw_fence * vfence)236 void vmw_fence_destroy(struct vmw_fence *vfence)
237 {
238 if (vfence->fence_fd != -1)
239 close(vfence->fence_fd);
240
241 FREE(vfence);
242 }
243
244
245 /**
246 * vmw_fence_reference - Reference / unreference a vmw fence object.
247 *
248 * @vws: Pointer to the winsys screen.
249 * @ptr: Pointer to reference transfer destination.
250 * @fence: Pointer to object to reference. May be NULL.
251 */
252 void
vmw_fence_reference(struct vmw_winsys_screen * vws,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)253 vmw_fence_reference(struct vmw_winsys_screen *vws,
254 struct pipe_fence_handle **ptr,
255 struct pipe_fence_handle *fence)
256 {
257 if (*ptr) {
258 struct vmw_fence *vfence = vmw_fence(*ptr);
259
260 if (p_atomic_dec_zero(&vfence->refcount)) {
261 struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
262
263 if (!vfence->imported) {
264 vmw_ioctl_fence_unref(vws, vfence->handle);
265
266 mtx_lock(&ops->mutex);
267 list_delinit(&vfence->ops_list);
268 mtx_unlock(&ops->mutex);
269 }
270
271 vmw_fence_destroy(vfence);
272 }
273 }
274
275 if (fence) {
276 struct vmw_fence *vfence = vmw_fence(fence);
277
278 p_atomic_inc(&vfence->refcount);
279 }
280
281 *ptr = fence;
282 }
283
284
285 /**
286 * vmw_fence_signalled - Check whether a fence object is signalled.
287 *
288 * @vws: Pointer to the winsys screen.
289 * @fence: Handle to the fence object.
290 * @flag: Fence flags to check. If the fence object can't signal
291 * a flag, it is assumed to be already signaled.
292 *
293 * Returns 0 if the fence object was signaled, nonzero otherwise.
294 */
295 int
vmw_fence_signalled(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,unsigned flag)296 vmw_fence_signalled(struct vmw_winsys_screen *vws,
297 struct pipe_fence_handle *fence,
298 unsigned flag)
299 {
300 struct vmw_fence *vfence;
301 int32_t vflags = SVGA_FENCE_FLAG_EXEC;
302 int ret;
303 uint32_t old;
304
305 if (!fence)
306 return 0;
307
308 vfence = vmw_fence(fence);
309 old = p_atomic_read(&vfence->signalled);
310
311 vflags &= ~vfence->mask;
312
313 if ((old & vflags) == vflags)
314 return 0;
315
316 /*
317 * Currently we update signaled fences on each execbuf call.
318 * That should really be sufficient, and we can avoid
319 * a lot of kernel calls this way.
320 */
321 #if 1
322 ret = vmw_ioctl_fence_signalled(vws, vfence->handle, vflags);
323
324 if (ret == 0)
325 p_atomic_set(&vfence->signalled, 1);
326 return ret;
327 #else
328 (void) ret;
329 return -1;
330 #endif
331 }
332
333 /**
334 * vmw_fence_finish - Wait for a fence object to signal.
335 *
336 * @vws: Pointer to the winsys screen.
337 * @fence: Handle to the fence object.
338 * @timeout: How long to wait before timing out.
339 * @flag: Fence flags to wait for. If the fence object can't signal
340 * a flag, it is assumed to be already signaled.
341 *
342 * Returns 0 if the wait succeeded. Nonzero otherwise.
343 */
344 int
vmw_fence_finish(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,uint64_t timeout,unsigned flag)345 vmw_fence_finish(struct vmw_winsys_screen *vws,
346 struct pipe_fence_handle *fence,
347 uint64_t timeout,
348 unsigned flag)
349 {
350 struct vmw_fence *vfence;
351 int32_t vflags = SVGA_FENCE_FLAG_EXEC;
352 int ret;
353 uint32_t old;
354
355 if (!fence)
356 return 0;
357
358 vfence = vmw_fence(fence);
359
360 if (vfence->imported) {
361 ret = sync_wait(vfence->fence_fd, timeout / 1000000);
362
363 if (!ret)
364 p_atomic_set(&vfence->signalled, 1);
365
366 return !!ret;
367 }
368
369 old = p_atomic_read(&vfence->signalled);
370 vflags &= ~vfence->mask;
371
372 if ((old & vflags) == vflags)
373 return 0;
374
375 ret = vmw_ioctl_fence_finish(vws, vfence->handle, vflags);
376
377 if (ret == 0) {
378 int32_t prev = old;
379
380 do {
381 old = prev;
382 prev = p_atomic_cmpxchg(&vfence->signalled, old, old | vflags);
383 } while (prev != old);
384 }
385
386 return ret;
387 }
388
389 /**
390 * vmw_fence_get_fd
391 *
392 * Returns the file descriptor associated with the fence
393 */
394 int
vmw_fence_get_fd(struct pipe_fence_handle * fence)395 vmw_fence_get_fd(struct pipe_fence_handle *fence)
396 {
397 struct vmw_fence *vfence;
398
399 if (!fence)
400 return -1;
401
402 vfence = vmw_fence(fence);
403 return vfence->fence_fd;
404 }
405
406
407 /**
408 * vmw_fence_ops_fence_reference - wrapper for the pb_fence_ops api.
409 *
410 * wrapper around vmw_fence_reference.
411 */
412 static void
vmw_fence_ops_fence_reference(struct pb_fence_ops * ops,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)413 vmw_fence_ops_fence_reference(struct pb_fence_ops *ops,
414 struct pipe_fence_handle **ptr,
415 struct pipe_fence_handle *fence)
416 {
417 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
418
419 vmw_fence_reference(vws, ptr, fence);
420 }
421
422 /**
423 * vmw_fence_ops_fence_signalled - wrapper for the pb_fence_ops api.
424 *
425 * wrapper around vmw_fence_signalled.
426 */
427 static int
vmw_fence_ops_fence_signalled(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)428 vmw_fence_ops_fence_signalled(struct pb_fence_ops *ops,
429 struct pipe_fence_handle *fence,
430 unsigned flag)
431 {
432 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
433
434 return vmw_fence_signalled(vws, fence, flag);
435 }
436
437
438 /**
439 * vmw_fence_ops_fence_finish - wrapper for the pb_fence_ops api.
440 *
441 * wrapper around vmw_fence_finish.
442 */
443 static int
vmw_fence_ops_fence_finish(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)444 vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
445 struct pipe_fence_handle *fence,
446 unsigned flag)
447 {
448 struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
449
450 return vmw_fence_finish(vws, fence, PIPE_TIMEOUT_INFINITE, flag);
451 }
452
453
454 /**
455 * vmw_fence_ops_destroy - Destroy a pb_fence_ops function table.
456 *
457 * @ops: The function table to destroy.
458 *
459 * Part of the pb_fence_ops api.
460 */
461 static void
vmw_fence_ops_destroy(struct pb_fence_ops * ops)462 vmw_fence_ops_destroy(struct pb_fence_ops *ops)
463 {
464 vmw_fences_release(vmw_fence_ops(ops));
465 FREE(ops);
466 }
467
468
469 /**
470 * vmw_fence_ops_create - Create a pb_fence_ops function table.
471 *
472 * @vws: Pointer to a struct vmw_winsys_screen.
473 *
474 * Returns a pointer to a pb_fence_ops function table to interface
475 * with pipe_buffer. This function is typically called on driver setup.
476 *
477 * Returns NULL on failure.
478 */
479 struct pb_fence_ops *
vmw_fence_ops_create(struct vmw_winsys_screen * vws)480 vmw_fence_ops_create(struct vmw_winsys_screen *vws)
481 {
482 struct vmw_fence_ops *ops;
483
484 ops = CALLOC_STRUCT(vmw_fence_ops);
485 if(!ops)
486 return NULL;
487
488 (void) mtx_init(&ops->mutex, mtx_plain);
489 list_inithead(&ops->not_signaled);
490 ops->base.destroy = &vmw_fence_ops_destroy;
491 ops->base.fence_reference = &vmw_fence_ops_fence_reference;
492 ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
493 ops->base.fence_finish = &vmw_fence_ops_fence_finish;
494
495 ops->vws = vws;
496
497 return &ops->base;
498 }
499