• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file glthread.c
25  *
26  * Support functions for the glthread feature of Mesa.
27  *
28  * In multicore systems, many applications end up CPU-bound with about half
29  * their time spent inside their rendering thread and half inside Mesa.  To
30  * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31  * quickly logs the GL commands to a buffer to be processed by a worker
32  * thread.
33  */
34 
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/glthread_marshal.h"
38 #include "main/hash.h"
39 #include "util/u_atomic.h"
40 #include "util/u_thread.h"
41 #include "util/u_cpu_detect.h"
42 #include "util/thread_sched.h"
43 
44 #include "state_tracker/st_context.h"
45 
46 static void
glthread_update_global_locking(struct gl_context * ctx)47 glthread_update_global_locking(struct gl_context *ctx)
48 {
49    struct gl_shared_state *shared = ctx->Shared;
50 
51    /* Determine if we should lock the global mutexes. */
52    simple_mtx_lock(&shared->Mutex);
53    int64_t current_time = os_time_get_nano();
54 
55    /* We can only lock the mutexes after NoLockDuration nanoseconds have
56     * passed since multiple contexts were active.
57     */
58    bool lock_mutexes = shared->GLThread.LastContextSwitchTime +
59                        shared->GLThread.NoLockDuration < current_time;
60 
61    /* Check if multiple contexts are active (the last executing context is
62     * different).
63     */
64    if (ctx != shared->GLThread.LastExecutingCtx) {
65       if (lock_mutexes) {
66          /* If we get here, we've been locking the global mutexes for a while
67           * and now we are switching contexts. */
68          if (shared->GLThread.LastContextSwitchTime +
69              120 * ONE_SECOND_IN_NS < current_time) {
70             /* If it's been more than 2 minutes of only one active context,
71              * indicating that there was no other active context for a long
72              * time, reset the no-lock time to its initial state of only 1
73              * second. This is most likely an infrequent situation of
74              * multi-context loading of game content and shaders.
75              * (this is a heuristic)
76              */
77             shared->GLThread.NoLockDuration = ONE_SECOND_IN_NS;
78          } else if (shared->GLThread.NoLockDuration < 32 * ONE_SECOND_IN_NS) {
79             /* Double the no-lock duration if we are transitioning from only
80              * one active context to multiple active contexts after a short
81              * time, up to a maximum of 32 seconds, indicating that multiple
82              * contexts are frequently executing. (this is a heuristic)
83              */
84             shared->GLThread.NoLockDuration *= 2;
85          }
86 
87          lock_mutexes = false;
88       }
89 
90       /* There are multiple active contexts. Update the last executing context
91        * and the last context switch time. We only start locking global mutexes
92        * after LastContextSwitchTime + NoLockDuration passes, so this
93        * effectively resets the non-locking stopwatch to 0, so that multiple
94        * contexts can execute simultaneously as long as they are not idle.
95        */
96       shared->GLThread.LastExecutingCtx = ctx;
97       shared->GLThread.LastContextSwitchTime = current_time;
98    }
99    simple_mtx_unlock(&shared->Mutex);
100 
101    ctx->GLThread.LockGlobalMutexes = lock_mutexes;
102 }
103 
104 static void
glthread_unmarshal_batch(void * job,void * gdata,int thread_index)105 glthread_unmarshal_batch(void *job, void *gdata, int thread_index)
106 {
107    struct glthread_batch *batch = (struct glthread_batch*)job;
108    struct gl_context *ctx = batch->ctx;
109    unsigned pos = 0;
110    unsigned used = batch->used;
111    uint64_t *buffer = batch->buffer;
112    struct gl_shared_state *shared = ctx->Shared;
113 
114    /* Determine once every 64 batches whether shared mutexes should be locked.
115     * We have to do this less frequently because os_time_get_nano() is very
116     * expensive if the clock source is not TSC. See:
117     *    https://gitlab.freedesktop.org/mesa/mesa/-/issues/8910
118     */
119    if (ctx->GLThread.GlobalLockUpdateBatchCounter++ % 64 == 0)
120       glthread_update_global_locking(ctx);
121 
122    /* Execute the GL calls. */
123    _glapi_set_dispatch(ctx->Dispatch.Current);
124 
125    /* Here we lock the mutexes once globally if possible. If not, we just
126     * fallback to the individual API calls doing it.
127     */
128    bool lock_mutexes = ctx->GLThread.LockGlobalMutexes;
129    if (lock_mutexes) {
130       _mesa_HashLockMutex(&shared->BufferObjects);
131       ctx->BufferObjectsLocked = true;
132       simple_mtx_lock(&shared->TexMutex);
133       ctx->TexturesLocked = true;
134    }
135 
136    while (pos < used) {
137       const struct marshal_cmd_base *cmd =
138          (const struct marshal_cmd_base *)&buffer[pos];
139 
140       pos += _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd);
141    }
142 
143    if (lock_mutexes) {
144       ctx->TexturesLocked = false;
145       simple_mtx_unlock(&shared->TexMutex);
146       ctx->BufferObjectsLocked = false;
147       _mesa_HashUnlockMutex(&shared->BufferObjects);
148    }
149 
150    assert(pos == used);
151    batch->used = 0;
152 
153    unsigned batch_index = batch - ctx->GLThread.batches;
154    _mesa_glthread_signal_call(&ctx->GLThread.LastProgramChangeBatch, batch_index);
155    _mesa_glthread_signal_call(&ctx->GLThread.LastDListChangeBatchIndex, batch_index);
156 
157    p_atomic_inc(&ctx->GLThread.stats.num_batches);
158 }
159 
160 static void
glthread_apply_thread_sched_policy(struct gl_context * ctx,bool initialization)161 glthread_apply_thread_sched_policy(struct gl_context *ctx, bool initialization)
162 {
163    struct glthread_state *glthread = &ctx->GLThread;
164 
165    if (!glthread->thread_sched_enabled)
166       return;
167 
168    /* Apply our thread scheduling policy for better multithreading
169     * performance.
170     */
171    if (initialization || ++glthread->pin_thread_counter % 128 == 0) {
172       int cpu = util_get_current_cpu();
173 
174       if (cpu >= 0 &&
175           util_thread_sched_apply_policy(glthread->queue.threads[0],
176                                          UTIL_THREAD_GLTHREAD, cpu,
177                                          &glthread->thread_sched_state)) {
178          /* If it's successful, apply the policy to the driver threads too. */
179          ctx->pipe->set_context_param(ctx->pipe,
180                                       PIPE_CONTEXT_PARAM_UPDATE_THREAD_SCHEDULING,
181                                       cpu);
182       }
183    }
184 }
185 
186 static void
glthread_thread_initialization(void * job,void * gdata,int thread_index)187 glthread_thread_initialization(void *job, void *gdata, int thread_index)
188 {
189    struct gl_context *ctx = (struct gl_context*)job;
190 
191    st_set_background_context(ctx, &ctx->GLThread.stats);
192    _glapi_set_context(ctx);
193 }
194 
195 static void
_mesa_glthread_init_dispatch(struct gl_context * ctx,struct _glapi_table * table)196 _mesa_glthread_init_dispatch(struct gl_context *ctx,
197                              struct _glapi_table *table)
198 {
199    _mesa_glthread_init_dispatch0(ctx, table);
200    _mesa_glthread_init_dispatch1(ctx, table);
201    _mesa_glthread_init_dispatch2(ctx, table);
202    _mesa_glthread_init_dispatch3(ctx, table);
203    _mesa_glthread_init_dispatch4(ctx, table);
204    _mesa_glthread_init_dispatch5(ctx, table);
205    _mesa_glthread_init_dispatch6(ctx, table);
206    _mesa_glthread_init_dispatch7(ctx, table);
207 }
208 
209 void
_mesa_glthread_init(struct gl_context * ctx)210 _mesa_glthread_init(struct gl_context *ctx)
211 {
212    struct pipe_screen *screen = ctx->screen;
213    struct glthread_state *glthread = &ctx->GLThread;
214    assert(!glthread->enabled);
215 
216    if (!screen->get_param(screen, PIPE_CAP_MAP_UNSYNCHRONIZED_THREAD_SAFE) ||
217        !screen->get_param(screen, PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION))
218       return;
219 
220    if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
221                         1, 0, NULL)) {
222       return;
223    }
224 
225    _mesa_InitHashTable(&glthread->VAOs);
226    _mesa_glthread_reset_vao(&glthread->DefaultVAO);
227    glthread->CurrentVAO = &glthread->DefaultVAO;
228 
229    ctx->MarshalExec = _mesa_alloc_dispatch_table(true);
230    if (!ctx->MarshalExec) {
231       _mesa_DeinitHashTable(&glthread->VAOs, NULL, NULL);
232       util_queue_destroy(&glthread->queue);
233       return;
234    }
235 
236    _mesa_glthread_init_dispatch(ctx, ctx->MarshalExec);
237 
238    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
239       glthread->batches[i].ctx = ctx;
240       util_queue_fence_init(&glthread->batches[i].fence);
241    }
242    glthread->next_batch = &glthread->batches[glthread->next];
243    glthread->used = 0;
244    glthread->stats.queue = &glthread->queue;
245 
246    _mesa_glthread_init_call_fence(&glthread->LastProgramChangeBatch);
247    _mesa_glthread_init_call_fence(&glthread->LastDListChangeBatchIndex);
248 
249    _mesa_glthread_enable(ctx);
250 
251    /* Execute the thread initialization function in the thread. */
252    struct util_queue_fence fence;
253    util_queue_fence_init(&fence);
254    util_queue_add_job(&glthread->queue, ctx, &fence,
255                       glthread_thread_initialization, NULL, 0);
256    util_queue_fence_wait(&fence);
257    util_queue_fence_destroy(&fence);
258 
259    glthread->thread_sched_enabled = ctx->pipe->set_context_param &&
260                                     util_thread_scheduler_enabled();
261    util_thread_scheduler_init_state(&glthread->thread_sched_state);
262    glthread_apply_thread_sched_policy(ctx, true);
263 }
264 
265 static void
free_vao(void * data,UNUSED void * userData)266 free_vao(void *data, UNUSED void *userData)
267 {
268    free(data);
269 }
270 
271 void
_mesa_glthread_destroy(struct gl_context * ctx)272 _mesa_glthread_destroy(struct gl_context *ctx)
273 {
274    struct glthread_state *glthread = &ctx->GLThread;
275 
276    _mesa_glthread_disable(ctx);
277 
278    if (util_queue_is_initialized(&glthread->queue)) {
279       util_queue_destroy(&glthread->queue);
280 
281       for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
282          util_queue_fence_destroy(&glthread->batches[i].fence);
283 
284       _mesa_DeinitHashTable(&glthread->VAOs, free_vao, NULL);
285       _mesa_glthread_release_upload_buffer(ctx);
286    }
287 }
288 
_mesa_glthread_enable(struct gl_context * ctx)289 void _mesa_glthread_enable(struct gl_context *ctx)
290 {
291    if (ctx->GLThread.enabled ||
292        ctx->Dispatch.Current == ctx->Dispatch.ContextLost ||
293        ctx->GLThread.DebugOutputSynchronous)
294       return;
295 
296    ctx->GLThread.enabled = true;
297    ctx->GLApi = ctx->MarshalExec;
298 
299    /* glthread takes over all thread scheduling. */
300    ctx->st->pin_thread_counter = ST_THREAD_SCHEDULER_DISABLED;
301 
302    /* Update the dispatch only if the dispatch is current. */
303    if (_glapi_get_dispatch() == ctx->Dispatch.Current) {
304        _glapi_set_dispatch(ctx->GLApi);
305    }
306 }
307 
_mesa_glthread_disable(struct gl_context * ctx)308 void _mesa_glthread_disable(struct gl_context *ctx)
309 {
310    if (!ctx->GLThread.enabled)
311       return;
312 
313    _mesa_glthread_finish(ctx);
314 
315    ctx->GLThread.enabled = false;
316    ctx->GLApi = ctx->Dispatch.Current;
317 
318    /* Re-enable thread scheduling in st/mesa when glthread is disabled. */
319    if (ctx->pipe->set_context_param && util_thread_scheduler_enabled())
320       ctx->st->pin_thread_counter = 0;
321 
322    /* Update the dispatch only if the dispatch is current. */
323    if (_glapi_get_dispatch() == ctx->MarshalExec) {
324        _glapi_set_dispatch(ctx->GLApi);
325    }
326 
327    /* Unbind VBOs in all VAOs that glthread bound for non-VBO vertex uploads
328     * to restore original states.
329     */
330    if (ctx->API != API_OPENGL_CORE)
331       _mesa_glthread_unbind_uploaded_vbos(ctx);
332 }
333 
334 static void
glthread_finalize_batch(struct glthread_state * glthread,unsigned * num_items_counter)335 glthread_finalize_batch(struct glthread_state *glthread,
336                         unsigned *num_items_counter)
337 {
338    struct glthread_batch *next = glthread->next_batch;
339 
340    /* Mark the end of the batch, but don't increment "used". */
341    struct marshal_cmd_base *last =
342       (struct marshal_cmd_base *)&next->buffer[glthread->used];
343    last->cmd_id = NUM_DISPATCH_CMD;
344 
345    p_atomic_add(num_items_counter, glthread->used);
346    next->used = glthread->used;
347    glthread->used = 0;
348 
349    glthread->LastCallList = NULL;
350    glthread->LastBindBuffer1 = NULL;
351    glthread->LastBindBuffer2 = NULL;
352 }
353 
354 void
_mesa_glthread_flush_batch(struct gl_context * ctx)355 _mesa_glthread_flush_batch(struct gl_context *ctx)
356 {
357    struct glthread_state *glthread = &ctx->GLThread;
358    if (!glthread->enabled)
359       return;
360 
361    if (ctx->Dispatch.Current == ctx->Dispatch.ContextLost) {
362       _mesa_glthread_disable(ctx);
363       return;
364    }
365 
366    if (!glthread->used)
367       return; /* the batch is empty */
368 
369    glthread_apply_thread_sched_policy(ctx, false);
370    glthread_finalize_batch(glthread, &glthread->stats.num_offloaded_items);
371 
372    struct glthread_batch *next = glthread->next_batch;
373 
374    util_queue_add_job(&glthread->queue, next, &next->fence,
375                       glthread_unmarshal_batch, NULL, 0);
376    glthread->last = glthread->next;
377    glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
378    glthread->next_batch = &glthread->batches[glthread->next];
379 }
380 
381 /**
382  * Waits for all pending batches have been unmarshaled.
383  *
384  * This can be used by the main thread to synchronize access to the context,
385  * since the worker thread will be idle after this.
386  */
387 void
_mesa_glthread_finish(struct gl_context * ctx)388 _mesa_glthread_finish(struct gl_context *ctx)
389 {
390    struct glthread_state *glthread = &ctx->GLThread;
391    if (!glthread->enabled)
392       return;
393 
394    /* If this is called from the worker thread, then we've hit a path that
395     * might be called from either the main thread or the worker (such as some
396     * dri interface entrypoints), in which case we don't need to actually
397     * synchronize against ourself.
398     */
399    if (u_thread_is_self(glthread->queue.threads[0]))
400       return;
401 
402    struct glthread_batch *last = &glthread->batches[glthread->last];
403    struct glthread_batch *next = glthread->next_batch;
404    bool synced = false;
405 
406    if (!util_queue_fence_is_signalled(&last->fence)) {
407       util_queue_fence_wait(&last->fence);
408       synced = true;
409    }
410 
411    glthread_apply_thread_sched_policy(ctx, false);
412 
413    if (glthread->used) {
414       glthread_finalize_batch(glthread, &glthread->stats.num_direct_items);
415 
416       /* Since glthread_unmarshal_batch changes the dispatch to direct,
417        * restore it after it's done.
418        */
419       struct _glapi_table *dispatch = _glapi_get_dispatch();
420       glthread_unmarshal_batch(next, NULL, 0);
421       _glapi_set_dispatch(dispatch);
422 
423       /* It's not a sync because we don't enqueue partial batches, but
424        * it would be a sync if we did. So count it anyway.
425        */
426       synced = true;
427    }
428 
429    if (synced)
430       p_atomic_inc(&glthread->stats.num_syncs);
431 }
432 
433 void
_mesa_glthread_finish_before(struct gl_context * ctx,const char * func)434 _mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
435 {
436    _mesa_glthread_finish(ctx);
437 
438    /* Uncomment this if you want to know where glthread syncs. */
439    /*printf("fallback to sync: %s\n", func);*/
440 }
441 
442 void
_mesa_error_glthread_safe(struct gl_context * ctx,GLenum error,bool glthread,const char * format,...)443 _mesa_error_glthread_safe(struct gl_context *ctx, GLenum error, bool glthread,
444                           const char *format, ...)
445 {
446    if (glthread) {
447       _mesa_marshal_InternalSetError(error);
448    } else {
449       char s[MAX_DEBUG_MESSAGE_LENGTH];
450       va_list args;
451 
452       va_start(args, format);
453       ASSERTED size_t len = vsnprintf(s, MAX_DEBUG_MESSAGE_LENGTH, format, args);
454       va_end(args);
455 
456       /* Whoever calls _mesa_error should use shorter strings. */
457       assert(len < MAX_DEBUG_MESSAGE_LENGTH);
458 
459       _mesa_error(ctx, error, "%s", s);
460    }
461 }
462 
463 bool
_mesa_glthread_invalidate_zsbuf(struct gl_context * ctx)464 _mesa_glthread_invalidate_zsbuf(struct gl_context *ctx)
465 {
466    struct glthread_state *glthread = &ctx->GLThread;
467    if (!glthread->enabled)
468       return false;
469    _mesa_marshal_InternalInvalidateFramebufferAncillaryMESA();
470    return true;
471 }
472