• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**********************************************************
2  * Copyright 2009-2015 VMware, Inc.  All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person
5  * obtaining a copy of this software and associated documentation
6  * files (the "Software"), to deal in the Software without
7  * restriction, including without limitation the rights to use, copy,
8  * modify, merge, publish, distribute, sublicense, and/or sell copies
9  * of the Software, and to permit persons to whom the Software is
10  * furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  **********************************************************/
25 #include "util/u_memory.h"
26 #include "util/u_atomic.h"
27 #include "util/list.h"
28 #include "os/os_thread.h"
29 
30 #include "pipebuffer/pb_buffer_fenced.h"
31 
32 #include "vmw_screen.h"
33 #include "vmw_fence.h"
34 
35 struct vmw_fence_ops
36 {
37    /*
38     * Immutable members.
39     */
40    struct pb_fence_ops base;
41    struct vmw_winsys_screen *vws;
42 
43    pipe_mutex mutex;
44 
45    /*
46     * Protected by mutex;
47     */
48    struct list_head not_signaled;
49    uint32_t last_signaled;
50    uint32_t last_emitted;
51 };
52 
53 struct vmw_fence
54 {
55    struct list_head ops_list;
56    int32_t refcount;
57    uint32_t handle;
58    uint32_t mask;
59    int32_t signalled;
60    uint32_t seqno;
61 };
62 
63 /**
64  * vmw_fence_seq_is_signaled - Check whether a fence seqno is
65  * signaled.
66  *
67  * @ops: Pointer to a struct pb_fence_ops.
68  *
69  */
70 static inline boolean
vmw_fence_seq_is_signaled(uint32_t seq,uint32_t last,uint32_t cur)71 vmw_fence_seq_is_signaled(uint32_t seq, uint32_t last, uint32_t cur)
72 {
73    return (cur - last <= cur - seq);
74 }
75 
76 
77 /**
78  * vmw_fence_ops - Return the vmw_fence_ops structure backing a
79  * struct pb_fence_ops pointer.
80  *
81  * @ops: Pointer to a struct pb_fence_ops.
82  *
83  */
84 static inline struct vmw_fence_ops *
vmw_fence_ops(struct pb_fence_ops * ops)85 vmw_fence_ops(struct pb_fence_ops *ops)
86 {
87    assert(ops);
88    return (struct vmw_fence_ops *)ops;
89 }
90 
91 
92 /**
93  * vmw_fences_release - Release all fences from the not_signaled
94  * list.
95  *
96  * @ops: Pointer to a struct vmw_fence_ops.
97  *
98  */
99 static void
vmw_fences_release(struct vmw_fence_ops * ops)100 vmw_fences_release(struct vmw_fence_ops *ops)
101 {
102    struct vmw_fence *fence, *n;
103 
104    pipe_mutex_lock(ops->mutex);
105    LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list)
106       LIST_DELINIT(&fence->ops_list);
107    pipe_mutex_unlock(ops->mutex);
108 }
109 
110 /**
111  * vmw_fences_signal - Traverse the not_signaled list and try to
112  * signal unsignaled fences.
113  *
114  * @ops: Pointer to a struct pb_fence_ops.
115  * @signaled: Seqno that has signaled.
116  * @emitted: Last seqno emitted by the kernel.
117  * @has_emitted: Whether we provide the emitted value.
118  *
119  */
120 void
vmw_fences_signal(struct pb_fence_ops * fence_ops,uint32_t signaled,uint32_t emitted,boolean has_emitted)121 vmw_fences_signal(struct pb_fence_ops *fence_ops,
122                   uint32_t signaled,
123                   uint32_t emitted,
124                   boolean has_emitted)
125 {
126    struct vmw_fence_ops *ops = NULL;
127    struct vmw_fence *fence, *n;
128 
129    if (fence_ops == NULL)
130       return;
131 
132    ops = vmw_fence_ops(fence_ops);
133    pipe_mutex_lock(ops->mutex);
134 
135    if (!has_emitted) {
136       emitted = ops->last_emitted;
137       if (emitted - signaled > (1 << 30))
138 	emitted = signaled;
139    }
140 
141    if (signaled == ops->last_signaled && emitted == ops->last_emitted)
142       goto out_unlock;
143 
144    LIST_FOR_EACH_ENTRY_SAFE(fence, n, &ops->not_signaled, ops_list) {
145       if (!vmw_fence_seq_is_signaled(fence->seqno, signaled, emitted))
146          break;
147 
148       p_atomic_set(&fence->signalled, 1);
149       LIST_DELINIT(&fence->ops_list);
150    }
151    ops->last_signaled = signaled;
152    ops->last_emitted = emitted;
153 
154 out_unlock:
155    pipe_mutex_unlock(ops->mutex);
156 }
157 
158 
159 /**
160  * vmw_fence - return the vmw_fence object identified by a
161  * struct pipe_fence_handle *
162  *
163  * @fence: The opaque pipe fence handle.
164  */
165 static inline struct vmw_fence *
vmw_fence(struct pipe_fence_handle * fence)166 vmw_fence(struct pipe_fence_handle *fence)
167 {
168    return (struct vmw_fence *) fence;
169 }
170 
171 
172 /**
173  * vmw_fence_create - Create a user-space fence object.
174  *
175  * @fence_ops: The fence_ops manager to register with.
176  * @handle: Handle identifying the kernel fence object.
177  * @mask: Mask of flags that this fence object may signal.
178  *
179  * Returns NULL on failure.
180  */
181 struct pipe_fence_handle *
vmw_fence_create(struct pb_fence_ops * fence_ops,uint32_t handle,uint32_t seqno,uint32_t mask)182 vmw_fence_create(struct pb_fence_ops *fence_ops, uint32_t handle,
183                  uint32_t seqno, uint32_t mask)
184 {
185    struct vmw_fence *fence = CALLOC_STRUCT(vmw_fence);
186    struct vmw_fence_ops *ops = vmw_fence_ops(fence_ops);
187 
188    if (!fence)
189       return NULL;
190 
191    p_atomic_set(&fence->refcount, 1);
192    fence->handle = handle;
193    fence->mask = mask;
194    fence->seqno = seqno;
195    p_atomic_set(&fence->signalled, 0);
196    pipe_mutex_lock(ops->mutex);
197 
198    if (vmw_fence_seq_is_signaled(seqno, ops->last_signaled, seqno)) {
199       p_atomic_set(&fence->signalled, 1);
200       LIST_INITHEAD(&fence->ops_list);
201    } else {
202       p_atomic_set(&fence->signalled, 0);
203       LIST_ADDTAIL(&fence->ops_list, &ops->not_signaled);
204    }
205 
206    pipe_mutex_unlock(ops->mutex);
207 
208    return (struct pipe_fence_handle *) fence;
209 }
210 
211 
212 /**
213  * vmw_fence_reference - Reference / unreference a vmw fence object.
214  *
215  * @vws: Pointer to the winsys screen.
216  * @ptr: Pointer to reference transfer destination.
217  * @fence: Pointer to object to reference. May be NULL.
218  */
219 void
vmw_fence_reference(struct vmw_winsys_screen * vws,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)220 vmw_fence_reference(struct vmw_winsys_screen *vws,
221 		    struct pipe_fence_handle **ptr,
222 		    struct pipe_fence_handle *fence)
223 {
224    if (*ptr) {
225       struct vmw_fence *vfence = vmw_fence(*ptr);
226 
227       if (p_atomic_dec_zero(&vfence->refcount)) {
228          struct vmw_fence_ops *ops = vmw_fence_ops(vws->fence_ops);
229 
230 	 vmw_ioctl_fence_unref(vws, vfence->handle);
231 
232          pipe_mutex_lock(ops->mutex);
233          LIST_DELINIT(&vfence->ops_list);
234          pipe_mutex_unlock(ops->mutex);
235 
236 	 FREE(vfence);
237       }
238    }
239 
240    if (fence) {
241       struct vmw_fence *vfence = vmw_fence(fence);
242 
243       p_atomic_inc(&vfence->refcount);
244    }
245 
246    *ptr = fence;
247 }
248 
249 
250 /**
251  * vmw_fence_signalled - Check whether a fence object is signalled.
252  *
253  * @vws: Pointer to the winsys screen.
254  * @fence: Handle to the fence object.
255  * @flag: Fence flags to check. If the fence object can't signal
256  * a flag, it is assumed to be already signaled.
257  *
258  * Returns 0 if the fence object was signaled, nonzero otherwise.
259  */
260 int
vmw_fence_signalled(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,unsigned flag)261 vmw_fence_signalled(struct vmw_winsys_screen *vws,
262 		   struct pipe_fence_handle *fence,
263 		   unsigned flag)
264 {
265    struct vmw_fence *vfence;
266    int32_t vflags = SVGA_FENCE_FLAG_EXEC;
267    int ret;
268    uint32_t old;
269 
270    if (!fence)
271       return 0;
272 
273    vfence = vmw_fence(fence);
274    old = p_atomic_read(&vfence->signalled);
275 
276    vflags &= ~vfence->mask;
277 
278    if ((old & vflags) == vflags)
279       return 0;
280 
281    /*
282     * Currently we update signaled fences on each execbuf call.
283     * That should really be sufficient, and we can avoid
284     * a lot of kernel calls this way.
285     */
286 #if 1
287    ret = vmw_ioctl_fence_signalled(vws, vfence->handle, vflags);
288 
289    if (ret == 0)
290       p_atomic_set(&vfence->signalled, 1);
291    return ret;
292 #else
293    (void) ret;
294    return -1;
295 #endif
296 }
297 
298 /**
299  * vmw_fence_finish - Wait for a fence object to signal.
300  *
301  * @vws: Pointer to the winsys screen.
302  * @fence: Handle to the fence object.
303  * @flag: Fence flags to wait for. If the fence object can't signal
304  * a flag, it is assumed to be already signaled.
305  *
306  * Returns 0 if the wait succeeded. Nonzero otherwise.
307  */
308 int
vmw_fence_finish(struct vmw_winsys_screen * vws,struct pipe_fence_handle * fence,unsigned flag)309 vmw_fence_finish(struct vmw_winsys_screen *vws,
310 		 struct pipe_fence_handle *fence,
311 		 unsigned flag)
312 {
313    struct vmw_fence *vfence;
314    int32_t vflags = SVGA_FENCE_FLAG_EXEC;
315    int ret;
316    uint32_t old;
317 
318    if (!fence)
319       return 0;
320 
321    vfence = vmw_fence(fence);
322    old = p_atomic_read(&vfence->signalled);
323    vflags &= ~vfence->mask;
324 
325    if ((old & vflags) == vflags)
326       return 0;
327 
328    ret = vmw_ioctl_fence_finish(vws, vfence->handle, vflags);
329 
330    if (ret == 0) {
331       int32_t prev = old;
332 
333       do {
334 	 old = prev;
335 	 prev = p_atomic_cmpxchg(&vfence->signalled, old, old | vflags);
336       } while (prev != old);
337    }
338 
339    return ret;
340 }
341 
342 
343 /**
344  * vmw_fence_ops_fence_reference - wrapper for the pb_fence_ops api.
345  *
346  * wrapper around vmw_fence_reference.
347  */
348 static void
vmw_fence_ops_fence_reference(struct pb_fence_ops * ops,struct pipe_fence_handle ** ptr,struct pipe_fence_handle * fence)349 vmw_fence_ops_fence_reference(struct pb_fence_ops *ops,
350                               struct pipe_fence_handle **ptr,
351                               struct pipe_fence_handle *fence)
352 {
353    struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
354 
355    vmw_fence_reference(vws, ptr, fence);
356 }
357 
358 /**
359  * vmw_fence_ops_fence_signalled - wrapper for the pb_fence_ops api.
360  *
361  * wrapper around vmw_fence_signalled.
362  */
363 static int
vmw_fence_ops_fence_signalled(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)364 vmw_fence_ops_fence_signalled(struct pb_fence_ops *ops,
365                               struct pipe_fence_handle *fence,
366                               unsigned flag)
367 {
368    struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
369 
370    return vmw_fence_signalled(vws, fence, flag);
371 }
372 
373 
374 /**
375  * vmw_fence_ops_fence_finish - wrapper for the pb_fence_ops api.
376  *
377  * wrapper around vmw_fence_finish.
378  */
379 static int
vmw_fence_ops_fence_finish(struct pb_fence_ops * ops,struct pipe_fence_handle * fence,unsigned flag)380 vmw_fence_ops_fence_finish(struct pb_fence_ops *ops,
381                            struct pipe_fence_handle *fence,
382                            unsigned flag)
383 {
384    struct vmw_winsys_screen *vws = vmw_fence_ops(ops)->vws;
385 
386    return vmw_fence_finish(vws, fence, flag);
387 }
388 
389 
390 /**
391  * vmw_fence_ops_destroy - Destroy a pb_fence_ops function table.
392  *
393  * @ops: The function table to destroy.
394  *
395  * Part of the pb_fence_ops api.
396  */
397 static void
vmw_fence_ops_destroy(struct pb_fence_ops * ops)398 vmw_fence_ops_destroy(struct pb_fence_ops *ops)
399 {
400    vmw_fences_release(vmw_fence_ops(ops));
401    FREE(ops);
402 }
403 
404 
405 /**
406  * vmw_fence_ops_create - Create a pb_fence_ops function table.
407  *
408  * @vws: Pointer to a struct vmw_winsys_screen.
409  *
410  * Returns a pointer to a pb_fence_ops function table to interface
411  * with pipe_buffer. This function is typically called on driver setup.
412  *
413  * Returns NULL on failure.
414  */
415 struct pb_fence_ops *
vmw_fence_ops_create(struct vmw_winsys_screen * vws)416 vmw_fence_ops_create(struct vmw_winsys_screen *vws)
417 {
418    struct vmw_fence_ops *ops;
419 
420    ops = CALLOC_STRUCT(vmw_fence_ops);
421    if(!ops)
422       return NULL;
423 
424    pipe_mutex_init(ops->mutex);
425    LIST_INITHEAD(&ops->not_signaled);
426    ops->base.destroy = &vmw_fence_ops_destroy;
427    ops->base.fence_reference = &vmw_fence_ops_fence_reference;
428    ops->base.fence_signalled = &vmw_fence_ops_fence_signalled;
429    ops->base.fence_finish = &vmw_fence_ops_fence_finish;
430 
431    ops->vws = vws;
432 
433    return &ops->base;
434 }
435