Lines Matching +full:shared +full:- +full:glapi
28 * In multicore systems, many applications end up CPU-bound with about half
50 struct gl_shared_state *shared = ctx->Shared; in glthread_update_global_locking() local
53 simple_mtx_lock(&shared->Mutex); in glthread_update_global_locking()
59 bool lock_mutexes = shared->GLThread.LastContextSwitchTime + in glthread_update_global_locking()
60 shared->GLThread.NoLockDuration < current_time; in glthread_update_global_locking()
65 if (ctx != shared->GLThread.LastExecutingCtx) { in glthread_update_global_locking()
69 if (shared->GLThread.LastContextSwitchTime + in glthread_update_global_locking()
73 * time, reset the no-lock time to its initial state of only 1 in glthread_update_global_locking()
75 * multi-context loading of game content and shaders. in glthread_update_global_locking()
78 shared->GLThread.NoLockDuration = ONE_SECOND_IN_NS; in glthread_update_global_locking()
79 } else if (shared->GLThread.NoLockDuration < 32 * ONE_SECOND_IN_NS) { in glthread_update_global_locking()
80 /* Double the no-lock duration if we are transitioning from only in glthread_update_global_locking()
85 shared->GLThread.NoLockDuration *= 2; in glthread_update_global_locking()
94 * effectively resets the non-locking stopwatch to 0, so that multiple in glthread_update_global_locking()
97 shared->GLThread.LastExecutingCtx = ctx; in glthread_update_global_locking()
98 shared->GLThread.LastContextSwitchTime = current_time; in glthread_update_global_locking()
100 simple_mtx_unlock(&shared->Mutex); in glthread_update_global_locking()
102 ctx->GLThread.LockGlobalMutexes = lock_mutexes; in glthread_update_global_locking()
109 struct gl_context *ctx = batch->ctx; in glthread_unmarshal_batch()
111 unsigned used = batch->used; in glthread_unmarshal_batch()
112 uint64_t *buffer = batch->buffer; in glthread_unmarshal_batch()
113 struct gl_shared_state *shared = ctx->Shared; in glthread_unmarshal_batch() local
115 /* Determine once every 64 batches whether shared mutexes should be locked. in glthread_unmarshal_batch()
118 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/8910 in glthread_unmarshal_batch()
120 if (ctx->GLThread.GlobalLockUpdateBatchCounter++ % 64 == 0) in glthread_unmarshal_batch()
124 _mesa_glapi_set_dispatch(ctx->Dispatch.Current); in glthread_unmarshal_batch()
129 bool lock_mutexes = ctx->GLThread.LockGlobalMutexes; in glthread_unmarshal_batch()
131 _mesa_HashLockMutex(&shared->BufferObjects); in glthread_unmarshal_batch()
132 ctx->BufferObjectsLocked = true; in glthread_unmarshal_batch()
133 simple_mtx_lock(&shared->TexMutex); in glthread_unmarshal_batch()
134 ctx->TexturesLocked = true; in glthread_unmarshal_batch()
141 pos += _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd); in glthread_unmarshal_batch()
145 ctx->TexturesLocked = false; in glthread_unmarshal_batch()
146 simple_mtx_unlock(&shared->TexMutex); in glthread_unmarshal_batch()
147 ctx->BufferObjectsLocked = false; in glthread_unmarshal_batch()
148 _mesa_HashUnlockMutex(&shared->BufferObjects); in glthread_unmarshal_batch()
152 batch->used = 0; in glthread_unmarshal_batch()
154 unsigned batch_index = batch - ctx->GLThread.batches; in glthread_unmarshal_batch()
155 _mesa_glthread_signal_call(&ctx->GLThread.LastProgramChangeBatch, batch_index); in glthread_unmarshal_batch()
156 _mesa_glthread_signal_call(&ctx->GLThread.LastDListChangeBatchIndex, batch_index); in glthread_unmarshal_batch()
158 p_atomic_inc(&ctx->GLThread.stats.num_batches); in glthread_unmarshal_batch()
164 struct glthread_state *glthread = &ctx->GLThread; in glthread_apply_thread_sched_policy()
166 if (!glthread->thread_sched_enabled) in glthread_apply_thread_sched_policy()
172 if (initialization || ++glthread->pin_thread_counter % 128 == 0) { in glthread_apply_thread_sched_policy()
176 util_thread_sched_apply_policy(glthread->queue.threads[0], in glthread_apply_thread_sched_policy()
178 &glthread->thread_sched_state)) { in glthread_apply_thread_sched_policy()
180 ctx->pipe->set_context_param(ctx->pipe, in glthread_apply_thread_sched_policy()
192 st_set_background_context(ctx, &ctx->GLThread.stats); in glthread_thread_initialization()
213 struct pipe_screen *screen = ctx->screen; in _mesa_glthread_init()
214 struct glthread_state *glthread = &ctx->GLThread; in _mesa_glthread_init()
215 assert(!glthread->enabled); in _mesa_glthread_init()
217 if (!screen->caps.map_unsynchronized_thread_safe || in _mesa_glthread_init()
218 !screen->caps.allow_mapped_buffers_during_execution) in _mesa_glthread_init()
221 if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2, in _mesa_glthread_init()
226 _mesa_InitHashTable(&glthread->VAOs, ctx->Shared->ReuseGLNames); in _mesa_glthread_init()
227 _mesa_glthread_reset_vao(&glthread->DefaultVAO); in _mesa_glthread_init()
228 glthread->CurrentVAO = &glthread->DefaultVAO; in _mesa_glthread_init()
230 ctx->MarshalExec = _mesa_alloc_dispatch_table(true); in _mesa_glthread_init()
231 if (!ctx->MarshalExec) { in _mesa_glthread_init()
232 _mesa_DeinitHashTable(&glthread->VAOs, NULL, NULL); in _mesa_glthread_init()
233 util_queue_destroy(&glthread->queue); in _mesa_glthread_init()
237 _mesa_glthread_init_dispatch(ctx, ctx->MarshalExec); in _mesa_glthread_init()
238 _mesa_init_pixelstore_attrib(ctx, &glthread->Unpack); in _mesa_glthread_init()
241 glthread->batches[i].ctx = ctx; in _mesa_glthread_init()
242 util_queue_fence_init(&glthread->batches[i].fence); in _mesa_glthread_init()
244 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_init()
245 glthread->used = 0; in _mesa_glthread_init()
246 glthread->stats.queue = &glthread->queue; in _mesa_glthread_init()
248 _mesa_glthread_init_call_fence(&glthread->LastProgramChangeBatch); in _mesa_glthread_init()
249 _mesa_glthread_init_call_fence(&glthread->LastDListChangeBatchIndex); in _mesa_glthread_init()
256 util_queue_add_job(&glthread->queue, ctx, &fence, in _mesa_glthread_init()
261 glthread->thread_sched_enabled = ctx->pipe->set_context_param && in _mesa_glthread_init()
263 util_thread_scheduler_init_state(&glthread->thread_sched_state); in _mesa_glthread_init()
276 struct glthread_state *glthread = &ctx->GLThread; in _mesa_glthread_destroy()
280 if (util_queue_is_initialized(&glthread->queue)) { in _mesa_glthread_destroy()
281 util_queue_destroy(&glthread->queue); in _mesa_glthread_destroy()
284 util_queue_fence_destroy(&glthread->batches[i].fence); in _mesa_glthread_destroy()
286 _mesa_DeinitHashTable(&glthread->VAOs, free_vao, NULL); in _mesa_glthread_destroy()
293 if (ctx->GLThread.enabled || in _mesa_glthread_enable()
294 ctx->Dispatch.Current == ctx->Dispatch.ContextLost || in _mesa_glthread_enable()
295 ctx->GLThread.DebugOutputSynchronous) in _mesa_glthread_enable()
298 ctx->GLThread.enabled = true; in _mesa_glthread_enable()
299 ctx->GLApi = ctx->MarshalExec; in _mesa_glthread_enable()
302 ctx->st->pin_thread_counter = ST_THREAD_SCHEDULER_DISABLED; in _mesa_glthread_enable()
305 if (_mesa_glapi_get_dispatch() == ctx->Dispatch.Current) { in _mesa_glthread_enable()
306 _mesa_glapi_set_dispatch(ctx->GLApi); in _mesa_glthread_enable()
312 if (!ctx->GLThread.enabled) in _mesa_glthread_disable()
317 ctx->GLThread.enabled = false; in _mesa_glthread_disable()
318 ctx->GLApi = ctx->Dispatch.Current; in _mesa_glthread_disable()
320 /* Re-enable thread scheduling in st/mesa when glthread is disabled. */ in _mesa_glthread_disable()
321 if (ctx->pipe->set_context_param && util_thread_scheduler_enabled()) in _mesa_glthread_disable()
322 ctx->st->pin_thread_counter = 0; in _mesa_glthread_disable()
325 if (_mesa_glapi_get_dispatch() == ctx->MarshalExec) { in _mesa_glthread_disable()
326 _mesa_glapi_set_dispatch(ctx->GLApi); in _mesa_glthread_disable()
329 /* Unbind VBOs in all VAOs that glthread bound for non-VBO vertex uploads in _mesa_glthread_disable()
332 if (ctx->API != API_OPENGL_CORE) in _mesa_glthread_disable()
340 struct glthread_batch *next = glthread->next_batch; in glthread_finalize_batch()
344 (struct marshal_cmd_base *)&next->buffer[glthread->used]; in glthread_finalize_batch()
345 last->cmd_id = NUM_DISPATCH_CMD; in glthread_finalize_batch()
347 p_atomic_add(num_items_counter, glthread->used); in glthread_finalize_batch()
348 next->used = glthread->used; in glthread_finalize_batch()
349 glthread->used = 0; in glthread_finalize_batch()
351 glthread->LastCallList = NULL; in glthread_finalize_batch()
352 glthread->LastBindBuffer1 = NULL; in glthread_finalize_batch()
353 glthread->LastBindBuffer2 = NULL; in glthread_finalize_batch()
359 struct glthread_state *glthread = &ctx->GLThread; in _mesa_glthread_flush_batch()
360 if (!glthread->enabled) in _mesa_glthread_flush_batch()
363 if (ctx->Dispatch.Current == ctx->Dispatch.ContextLost) { in _mesa_glthread_flush_batch()
368 if (!glthread->used) in _mesa_glthread_flush_batch()
372 glthread_finalize_batch(glthread, &glthread->stats.num_offloaded_items); in _mesa_glthread_flush_batch()
374 struct glthread_batch *next = glthread->next_batch; in _mesa_glthread_flush_batch()
376 util_queue_add_job(&glthread->queue, next, &next->fence, in _mesa_glthread_flush_batch()
378 glthread->last = glthread->next; in _mesa_glthread_flush_batch()
379 glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES; in _mesa_glthread_flush_batch()
380 glthread->next_batch = &glthread->batches[glthread->next]; in _mesa_glthread_flush_batch()
392 struct glthread_state *glthread = &ctx->GLThread; in _mesa_glthread_finish()
393 if (!glthread->enabled) in _mesa_glthread_finish()
401 if (u_thread_is_self(glthread->queue.threads[0])) in _mesa_glthread_finish()
404 struct glthread_batch *last = &glthread->batches[glthread->last]; in _mesa_glthread_finish()
405 struct glthread_batch *next = glthread->next_batch; in _mesa_glthread_finish()
408 if (!util_queue_fence_is_signalled(&last->fence)) { in _mesa_glthread_finish()
409 util_queue_fence_wait(&last->fence); in _mesa_glthread_finish()
415 if (glthread->used) { in _mesa_glthread_finish()
416 glthread_finalize_batch(glthread, &glthread->stats.num_direct_items); in _mesa_glthread_finish()
432 p_atomic_inc(&glthread->stats.num_syncs); in _mesa_glthread_finish()
468 struct glthread_state *glthread = &ctx->GLThread; in _mesa_glthread_invalidate_zsbuf()
469 if (!glthread->enabled) in _mesa_glthread_invalidate_zsbuf()
480 ctx->GLThread.Unpack.SwapBytes = !!param; in _mesa_glthread_PixelStorei()
483 ctx->GLThread.Unpack.LsbFirst = !!param; in _mesa_glthread_PixelStorei()
487 ctx->GLThread.Unpack.RowLength = param; in _mesa_glthread_PixelStorei()
491 ctx->GLThread.Unpack.ImageHeight = param; in _mesa_glthread_PixelStorei()
495 ctx->GLThread.Unpack.SkipPixels = param; in _mesa_glthread_PixelStorei()
499 ctx->GLThread.Unpack.SkipRows = param; in _mesa_glthread_PixelStorei()
503 ctx->GLThread.Unpack.SkipImages = param; in _mesa_glthread_PixelStorei()
507 ctx->GLThread.Unpack.Alignment = param; in _mesa_glthread_PixelStorei()
511 ctx->GLThread.Unpack.CompressedBlockWidth = param; in _mesa_glthread_PixelStorei()
515 ctx->GLThread.Unpack.CompressedBlockHeight = param; in _mesa_glthread_PixelStorei()
519 ctx->GLThread.Unpack.CompressedBlockDepth = param; in _mesa_glthread_PixelStorei()
523 ctx->GLThread.Unpack.CompressedBlockSize = param; in _mesa_glthread_PixelStorei()