• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file glthread.c
25  *
26  * Support functions for the glthread feature of Mesa.
27  *
28  * In multicore systems, many applications end up CPU-bound with about half
29  * their time spent inside their rendering thread and half inside Mesa.  To
30  * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31  * quickly logs the GL commands to a buffer to be processed by a worker
32  * thread.
33  */
34 
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/glthread_marshal.h"
38 #include "main/hash.h"
39 #include "util/u_atomic.h"
40 #include "util/u_thread.h"
41 #include "util/u_cpu_detect.h"
42 
43 #include "state_tracker/st_context.h"
44 
45 static void
glthread_unmarshal_batch(void * job,void * gdata,int thread_index)46 glthread_unmarshal_batch(void *job, void *gdata, int thread_index)
47 {
48    struct glthread_batch *batch = (struct glthread_batch*)job;
49    struct gl_context *ctx = batch->ctx;
50    unsigned pos = 0;
51    unsigned used = batch->used;
52    uint64_t *buffer = batch->buffer;
53    const uint64_t *last = &buffer[used];
54 
55    _glapi_set_dispatch(ctx->CurrentServerDispatch);
56 
57    _mesa_HashLockMutex(ctx->Shared->BufferObjects);
58    ctx->BufferObjectsLocked = true;
59    simple_mtx_lock(&ctx->Shared->TexMutex);
60    ctx->TexturesLocked = true;
61 
62    while (pos < used) {
63       const struct marshal_cmd_base *cmd =
64          (const struct marshal_cmd_base *)&buffer[pos];
65 
66       pos += _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd, last);
67    }
68 
69    ctx->TexturesLocked = false;
70    simple_mtx_unlock(&ctx->Shared->TexMutex);
71    ctx->BufferObjectsLocked = false;
72    _mesa_HashUnlockMutex(ctx->Shared->BufferObjects);
73 
74    assert(pos == used);
75    batch->used = 0;
76 
77    unsigned batch_index = batch - ctx->GLThread.batches;
78    /* Atomically set this to -1 if it's equal to batch_index. */
79    p_atomic_cmpxchg(&ctx->GLThread.LastProgramChangeBatch, batch_index, -1);
80    p_atomic_cmpxchg(&ctx->GLThread.LastDListChangeBatchIndex, batch_index, -1);
81 }
82 
83 static void
glthread_thread_initialization(void * job,void * gdata,int thread_index)84 glthread_thread_initialization(void *job, void *gdata, int thread_index)
85 {
86    struct gl_context *ctx = (struct gl_context*)job;
87 
88    st_set_background_context(ctx, &ctx->GLThread.stats);
89    _glapi_set_context(ctx);
90 }
91 
92 void
_mesa_glthread_init(struct gl_context * ctx)93 _mesa_glthread_init(struct gl_context *ctx)
94 {
95    struct glthread_state *glthread = &ctx->GLThread;
96 
97    assert(!glthread->enabled);
98 
99    if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
100                         1, 0, NULL)) {
101       return;
102    }
103 
104    glthread->VAOs = _mesa_NewHashTable();
105    if (!glthread->VAOs) {
106       util_queue_destroy(&glthread->queue);
107       return;
108    }
109 
110    _mesa_glthread_reset_vao(&glthread->DefaultVAO);
111    glthread->CurrentVAO = &glthread->DefaultVAO;
112 
113    if (!_mesa_create_marshal_tables(ctx)) {
114       _mesa_DeleteHashTable(glthread->VAOs);
115       util_queue_destroy(&glthread->queue);
116       return;
117    }
118 
119    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
120       glthread->batches[i].ctx = ctx;
121       util_queue_fence_init(&glthread->batches[i].fence);
122    }
123    glthread->next_batch = &glthread->batches[glthread->next];
124    glthread->used = 0;
125 
126    glthread->enabled = true;
127    glthread->stats.queue = &glthread->queue;
128 
129    glthread->SupportsBufferUploads =
130       ctx->Const.BufferCreateMapUnsynchronizedThreadSafe &&
131       ctx->Const.AllowMappedBuffersDuringExecution;
132 
133    /* If the draw start index is non-zero, glthread can upload to offset 0,
134     * which means the attrib offset has to be -(first * stride).
135     * So require signed vertex buffer offsets.
136     */
137    glthread->SupportsNonVBOUploads = glthread->SupportsBufferUploads &&
138                                      ctx->Const.VertexBufferOffsetIsInt32;
139 
140    ctx->CurrentClientDispatch = ctx->MarshalExec;
141 
142    glthread->LastDListChangeBatchIndex = -1;
143 
144    /* Execute the thread initialization function in the thread. */
145    struct util_queue_fence fence;
146    util_queue_fence_init(&fence);
147    util_queue_add_job(&glthread->queue, ctx, &fence,
148                       glthread_thread_initialization, NULL, 0);
149    util_queue_fence_wait(&fence);
150    util_queue_fence_destroy(&fence);
151 }
152 
153 static void
free_vao(void * data,UNUSED void * userData)154 free_vao(void *data, UNUSED void *userData)
155 {
156    free(data);
157 }
158 
159 void
_mesa_glthread_destroy(struct gl_context * ctx,const char * reason)160 _mesa_glthread_destroy(struct gl_context *ctx, const char *reason)
161 {
162    struct glthread_state *glthread = &ctx->GLThread;
163 
164    if (!glthread->enabled)
165       return;
166 
167    if (reason)
168       _mesa_debug(ctx, "glthread destroy reason: %s\n", reason);
169 
170    _mesa_glthread_finish(ctx);
171    util_queue_destroy(&glthread->queue);
172 
173    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
174       util_queue_fence_destroy(&glthread->batches[i].fence);
175 
176    _mesa_HashDeleteAll(glthread->VAOs, free_vao, NULL);
177    _mesa_DeleteHashTable(glthread->VAOs);
178 
179    ctx->GLThread.enabled = false;
180    ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
181 
182    /* Update the dispatch only if the context is current. */
183    if (_glapi_get_dispatch() == ctx->MarshalExec) {
184        _glapi_set_dispatch(ctx->CurrentClientDispatch);
185    }
186 }
187 
188 void
_mesa_glthread_flush_batch(struct gl_context * ctx)189 _mesa_glthread_flush_batch(struct gl_context *ctx)
190 {
191    struct glthread_state *glthread = &ctx->GLThread;
192    if (!glthread->enabled)
193       return;
194 
195    if (ctx->CurrentServerDispatch == ctx->ContextLost) {
196       _mesa_glthread_destroy(ctx, "context lost");
197       return;
198    }
199 
200    if (!glthread->used)
201       return; /* the batch is empty */
202 
203    /* Pin threads regularly to the same Zen CCX that the main thread is
204     * running on. The main thread can move between CCXs.
205     */
206    if (util_get_cpu_caps()->num_L3_caches > 1 &&
207        /* driver support */
208        ctx->pipe->set_context_param &&
209        ++glthread->pin_thread_counter % 128 == 0) {
210       int cpu = util_get_current_cpu();
211 
212       if (cpu >= 0) {
213          uint16_t L3_cache = util_get_cpu_caps()->cpu_to_L3[cpu];
214          if (L3_cache != U_CPU_INVALID_L3) {
215             util_set_thread_affinity(glthread->queue.threads[0],
216                                      util_get_cpu_caps()->L3_affinity_mask[L3_cache],
217                                      NULL, util_get_cpu_caps()->num_cpu_mask_bits);
218             ctx->pipe->set_context_param(ctx->pipe,
219                                          PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE,
220                                          L3_cache);
221          }
222       }
223    }
224 
225    struct glthread_batch *next = glthread->next_batch;
226 
227    /* Debug: execute the batch immediately from this thread.
228     *
229     * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
230     * need to restore it when it returns.
231     */
232    if (false) {
233       glthread_unmarshal_batch(next, NULL, 0);
234       _glapi_set_dispatch(ctx->CurrentClientDispatch);
235       return;
236    }
237 
238    p_atomic_add(&glthread->stats.num_offloaded_items, glthread->used);
239    next->used = glthread->used;
240 
241    util_queue_add_job(&glthread->queue, next, &next->fence,
242                       glthread_unmarshal_batch, NULL, 0);
243    glthread->last = glthread->next;
244    glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
245    glthread->next_batch = &glthread->batches[glthread->next];
246    glthread->used = 0;
247 }
248 
249 /**
250  * Waits for all pending batches have been unmarshaled.
251  *
252  * This can be used by the main thread to synchronize access to the context,
253  * since the worker thread will be idle after this.
254  */
255 void
_mesa_glthread_finish(struct gl_context * ctx)256 _mesa_glthread_finish(struct gl_context *ctx)
257 {
258    struct glthread_state *glthread = &ctx->GLThread;
259    if (!glthread->enabled)
260       return;
261 
262    /* If this is called from the worker thread, then we've hit a path that
263     * might be called from either the main thread or the worker (such as some
264     * dri interface entrypoints), in which case we don't need to actually
265     * synchronize against ourself.
266     */
267    if (u_thread_is_self(glthread->queue.threads[0]))
268       return;
269 
270    struct glthread_batch *last = &glthread->batches[glthread->last];
271    struct glthread_batch *next = glthread->next_batch;
272    bool synced = false;
273 
274    if (!util_queue_fence_is_signalled(&last->fence)) {
275       util_queue_fence_wait(&last->fence);
276       synced = true;
277    }
278 
279    if (glthread->used) {
280       p_atomic_add(&glthread->stats.num_direct_items, glthread->used);
281       next->used = glthread->used;
282       glthread->used = 0;
283 
284       /* Since glthread_unmarshal_batch changes the dispatch to direct,
285        * restore it after it's done.
286        */
287       struct _glapi_table *dispatch = _glapi_get_dispatch();
288       glthread_unmarshal_batch(next, NULL, 0);
289       _glapi_set_dispatch(dispatch);
290 
291       /* It's not a sync because we don't enqueue partial batches, but
292        * it would be a sync if we did. So count it anyway.
293        */
294       synced = true;
295    }
296 
297    if (synced)
298       p_atomic_inc(&glthread->stats.num_syncs);
299 }
300 
301 void
_mesa_glthread_finish_before(struct gl_context * ctx,const char * func)302 _mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
303 {
304    _mesa_glthread_finish(ctx);
305 
306    /* Uncomment this if you want to know where glthread syncs. */
307    /*printf("fallback to sync: %s\n", func);*/
308 }
309 
310 void
_mesa_error_glthread_safe(struct gl_context * ctx,GLenum error,bool glthread,const char * format,...)311 _mesa_error_glthread_safe(struct gl_context *ctx, GLenum error, bool glthread,
312                           const char *format, ...)
313 {
314    if (glthread) {
315       _mesa_marshal_InternalSetError(error);
316    } else {
317       char s[MAX_DEBUG_MESSAGE_LENGTH];
318       va_list args;
319 
320       va_start(args, format);
321       ASSERTED size_t len = vsnprintf(s, MAX_DEBUG_MESSAGE_LENGTH, format, args);
322       va_end(args);
323 
324       /* Whoever calls _mesa_error should use shorter strings. */
325       assert(len < MAX_DEBUG_MESSAGE_LENGTH);
326 
327       _mesa_error(ctx, error, "%s", s);
328    }
329 }
330