• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Christoph Bumiller
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #include "nouveau_screen.h"
24 #include "nouveau_winsys.h"
25 #include "nouveau_fence.h"
26 #include "util/os_time.h"
27 
28 #ifdef PIPE_OS_UNIX
29 #include <sched.h>
30 #endif
31 
32 bool
nouveau_fence_new(struct nouveau_screen * screen,struct nouveau_fence ** fence)33 nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence)
34 {
35    *fence = CALLOC_STRUCT(nouveau_fence);
36    if (!*fence)
37       return false;
38 
39    (*fence)->screen = screen;
40    (*fence)->ref = 1;
41    list_inithead(&(*fence)->work);
42 
43    return true;
44 }
45 
46 static void
nouveau_fence_trigger_work(struct nouveau_fence * fence)47 nouveau_fence_trigger_work(struct nouveau_fence *fence)
48 {
49    struct nouveau_fence_work *work, *tmp;
50 
51    LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
52       work->func(work->data);
53       list_del(&work->list);
54       FREE(work);
55    }
56 }
57 
58 void
nouveau_fence_emit(struct nouveau_fence * fence)59 nouveau_fence_emit(struct nouveau_fence *fence)
60 {
61    struct nouveau_screen *screen = fence->screen;
62 
63    assert(fence->state == NOUVEAU_FENCE_STATE_AVAILABLE);
64 
65    /* set this now, so that if fence.emit triggers a flush we don't recurse */
66    fence->state = NOUVEAU_FENCE_STATE_EMITTING;
67 
68    ++fence->ref;
69 
70    if (screen->fence.tail)
71       screen->fence.tail->next = fence;
72    else
73       screen->fence.head = fence;
74 
75    screen->fence.tail = fence;
76 
77    screen->fence.emit(&screen->base, &fence->sequence);
78 
79    assert(fence->state == NOUVEAU_FENCE_STATE_EMITTING);
80    fence->state = NOUVEAU_FENCE_STATE_EMITTED;
81 }
82 
83 void
nouveau_fence_del(struct nouveau_fence * fence)84 nouveau_fence_del(struct nouveau_fence *fence)
85 {
86    struct nouveau_fence *it;
87    struct nouveau_screen *screen = fence->screen;
88 
89    if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
90        fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
91       if (fence == screen->fence.head) {
92          screen->fence.head = fence->next;
93          if (!screen->fence.head)
94             screen->fence.tail = NULL;
95       } else {
96          for (it = screen->fence.head; it && it->next != fence; it = it->next);
97          it->next = fence->next;
98          if (screen->fence.tail == fence)
99             screen->fence.tail = it;
100       }
101    }
102 
103    if (!list_is_empty(&fence->work)) {
104       debug_printf("WARNING: deleting fence with work still pending !\n");
105       nouveau_fence_trigger_work(fence);
106    }
107 
108    FREE(fence);
109 }
110 
111 void
nouveau_fence_cleanup(struct nouveau_screen * screen)112 nouveau_fence_cleanup(struct nouveau_screen *screen)
113 {
114    if (screen->fence.current) {
115       struct nouveau_fence *current = NULL;
116 
117       /* nouveau_fence_wait will create a new current fence, so wait on the
118        * _current_ one, and remove both.
119        */
120       nouveau_fence_ref(screen->fence.current, &current);
121       nouveau_fence_wait(current, NULL);
122       nouveau_fence_ref(NULL, &current);
123       nouveau_fence_ref(NULL, &screen->fence.current);
124    }
125 }
126 
127 void
nouveau_fence_update(struct nouveau_screen * screen,bool flushed)128 nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
129 {
130    struct nouveau_fence *fence;
131    struct nouveau_fence *next = NULL;
132    u32 sequence = screen->fence.update(&screen->base);
133 
134    /* If running under drm-shim, let all fences be signalled so things run to
135     * completion (avoids a hang at the end of shader-db).
136     */
137    if (unlikely(screen->disable_fences))
138       sequence = screen->fence.sequence;
139 
140    if (screen->fence.sequence_ack == sequence)
141       return;
142    screen->fence.sequence_ack = sequence;
143 
144    for (fence = screen->fence.head; fence; fence = next) {
145       next = fence->next;
146       sequence = fence->sequence;
147 
148       fence->state = NOUVEAU_FENCE_STATE_SIGNALLED;
149 
150       nouveau_fence_trigger_work(fence);
151       nouveau_fence_ref(NULL, &fence);
152 
153       if (sequence == screen->fence.sequence_ack)
154          break;
155    }
156    screen->fence.head = next;
157    if (!next)
158       screen->fence.tail = NULL;
159 
160    if (flushed) {
161       for (fence = next; fence; fence = fence->next)
162          if (fence->state == NOUVEAU_FENCE_STATE_EMITTED)
163             fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
164    }
165 }
166 
167 #define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
168 
169 bool
nouveau_fence_signalled(struct nouveau_fence * fence)170 nouveau_fence_signalled(struct nouveau_fence *fence)
171 {
172    struct nouveau_screen *screen = fence->screen;
173 
174    if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
175       return true;
176 
177    if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
178       nouveau_fence_update(screen, false);
179 
180    return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
181 }
182 
183 static bool
nouveau_fence_kick(struct nouveau_fence * fence)184 nouveau_fence_kick(struct nouveau_fence *fence)
185 {
186    struct nouveau_screen *screen = fence->screen;
187 
188    /* wtf, someone is waiting on a fence in flush_notify handler? */
189    assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING);
190 
191    if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) {
192       PUSH_SPACE(screen->pushbuf, 8);
193       /* The space allocation might trigger a flush, which could emit the
194        * current fence. So check again.
195        */
196       if (fence->state < NOUVEAU_FENCE_STATE_EMITTED)
197          nouveau_fence_emit(fence);
198    }
199 
200    if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
201       if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel))
202          return false;
203 
204    if (fence == screen->fence.current)
205       nouveau_fence_next(screen);
206 
207    nouveau_fence_update(screen, false);
208 
209    return true;
210 }
211 
212 bool
nouveau_fence_wait(struct nouveau_fence * fence,struct util_debug_callback * debug)213 nouveau_fence_wait(struct nouveau_fence *fence, struct util_debug_callback *debug)
214 {
215    struct nouveau_screen *screen = fence->screen;
216    uint32_t spins = 0;
217    int64_t start = 0;
218 
219    if (debug && debug->debug_message)
220       start = os_time_get_nano();
221 
222    if (!nouveau_fence_kick(fence))
223       return false;
224 
225    do {
226       if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
227          if (debug && debug->debug_message)
228             util_debug_message(debug, PERF_INFO,
229                                "stalled %.3f ms waiting for fence",
230                                (os_time_get_nano() - start) / 1000000.f);
231          return true;
232       }
233       if (!spins)
234          NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
235       spins++;
236 #ifdef PIPE_OS_UNIX
237       if (!(spins % 8)) /* donate a few cycles */
238          sched_yield();
239 #endif
240 
241       nouveau_fence_update(screen, false);
242    } while (spins < NOUVEAU_FENCE_MAX_SPINS);
243 
244    debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
245                 fence->sequence,
246                 screen->fence.sequence_ack, screen->fence.sequence);
247 
248    return false;
249 }
250 
251 void
nouveau_fence_next(struct nouveau_screen * screen)252 nouveau_fence_next(struct nouveau_screen *screen)
253 {
254    if (screen->fence.current->state < NOUVEAU_FENCE_STATE_EMITTING) {
255       if (screen->fence.current->ref > 1)
256          nouveau_fence_emit(screen->fence.current);
257       else
258          return;
259    }
260 
261    nouveau_fence_ref(NULL, &screen->fence.current);
262 
263    nouveau_fence_new(screen, &screen->fence.current);
264 }
265 
266 void
nouveau_fence_unref_bo(void * data)267 nouveau_fence_unref_bo(void *data)
268 {
269    struct nouveau_bo *bo = data;
270 
271    nouveau_bo_ref(NULL, &bo);
272 }
273 
274 bool
nouveau_fence_work(struct nouveau_fence * fence,void (* func)(void *),void * data)275 nouveau_fence_work(struct nouveau_fence *fence,
276                    void (*func)(void *), void *data)
277 {
278    struct nouveau_fence_work *work;
279 
280    if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
281       func(data);
282       return true;
283    }
284 
285    work = CALLOC_STRUCT(nouveau_fence_work);
286    if (!work)
287       return false;
288    work->func = func;
289    work->data = data;
290    list_add(&work->list, &fence->work);
291    p_atomic_inc(&fence->work_count);
292    if (fence->work_count > 64)
293       nouveau_fence_kick(fence);
294    return true;
295 }
296