• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 /** @file glthread.c
25  *
26  * Support functions for the glthread feature of Mesa.
27  *
28  * In multicore systems, many applications end up CPU-bound with about half
29  * their time spent inside their rendering thread and half inside Mesa.  To
30  * alleviate this, we put a shim layer in Mesa at the GL dispatch level that
31  * quickly logs the GL commands to a buffer to be processed by a worker
32  * thread.
33  */
34 
35 #include "main/mtypes.h"
36 #include "main/glthread.h"
37 #include "main/glthread_marshal.h"
38 #include "main/hash.h"
39 #include "util/u_atomic.h"
40 #include "util/u_thread.h"
41 #include "util/u_cpu_detect.h"
42 
43 
44 static void
glthread_unmarshal_batch(void * job,int thread_index)45 glthread_unmarshal_batch(void *job, int thread_index)
46 {
47    struct glthread_batch *batch = (struct glthread_batch*)job;
48    struct gl_context *ctx = batch->ctx;
49    int pos = 0;
50    int used = batch->used;
51    uint8_t *buffer = batch->buffer;
52 
53    _glapi_set_dispatch(ctx->CurrentServerDispatch);
54 
55    while (pos < used) {
56       const struct marshal_cmd_base *cmd =
57          (const struct marshal_cmd_base *)&buffer[pos];
58 
59       _mesa_unmarshal_dispatch[cmd->cmd_id](ctx, cmd);
60       pos += cmd->cmd_size;
61    }
62 
63    assert(pos == used);
64    batch->used = 0;
65 }
66 
67 static void
glthread_thread_initialization(void * job,int thread_index)68 glthread_thread_initialization(void *job, int thread_index)
69 {
70    struct gl_context *ctx = (struct gl_context*)job;
71 
72    ctx->Driver.SetBackgroundContext(ctx, &ctx->GLThread.stats);
73    _glapi_set_context(ctx);
74 }
75 
76 void
_mesa_glthread_init(struct gl_context * ctx)77 _mesa_glthread_init(struct gl_context *ctx)
78 {
79    struct glthread_state *glthread = &ctx->GLThread;
80 
81    assert(!glthread->enabled);
82 
83    if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
84                         1, 0)) {
85       return;
86    }
87 
88    glthread->VAOs = _mesa_NewHashTable();
89    if (!glthread->VAOs) {
90       util_queue_destroy(&glthread->queue);
91       return;
92    }
93 
94    _mesa_glthread_reset_vao(&glthread->DefaultVAO);
95    glthread->CurrentVAO = &glthread->DefaultVAO;
96 
97    ctx->MarshalExec = _mesa_create_marshal_table(ctx);
98    if (!ctx->MarshalExec) {
99       _mesa_DeleteHashTable(glthread->VAOs);
100       util_queue_destroy(&glthread->queue);
101       return;
102    }
103 
104    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++) {
105       glthread->batches[i].ctx = ctx;
106       util_queue_fence_init(&glthread->batches[i].fence);
107    }
108    glthread->next_batch = &glthread->batches[glthread->next];
109 
110    glthread->enabled = true;
111    glthread->stats.queue = &glthread->queue;
112 
113    glthread->SupportsBufferUploads =
114       ctx->Const.BufferCreateMapUnsynchronizedThreadSafe &&
115       ctx->Const.AllowMappedBuffersDuringExecution;
116 
117    /* If the draw start index is non-zero, glthread can upload to offset 0,
118     * which means the attrib offset has to be -(first * stride).
119     * So require signed vertex buffer offsets.
120     */
121    glthread->SupportsNonVBOUploads = glthread->SupportsBufferUploads &&
122                                      ctx->Const.VertexBufferOffsetIsInt32;
123 
124    ctx->CurrentClientDispatch = ctx->MarshalExec;
125 
126    /* Execute the thread initialization function in the thread. */
127    struct util_queue_fence fence;
128    util_queue_fence_init(&fence);
129    util_queue_add_job(&glthread->queue, ctx, &fence,
130                       glthread_thread_initialization, NULL, 0);
131    util_queue_fence_wait(&fence);
132    util_queue_fence_destroy(&fence);
133 }
134 
135 static void
free_vao(void * data,UNUSED void * userData)136 free_vao(void *data, UNUSED void *userData)
137 {
138    free(data);
139 }
140 
141 void
_mesa_glthread_destroy(struct gl_context * ctx)142 _mesa_glthread_destroy(struct gl_context *ctx)
143 {
144    struct glthread_state *glthread = &ctx->GLThread;
145 
146    if (!glthread->enabled)
147       return;
148 
149    _mesa_glthread_finish(ctx);
150    util_queue_destroy(&glthread->queue);
151 
152    for (unsigned i = 0; i < MARSHAL_MAX_BATCHES; i++)
153       util_queue_fence_destroy(&glthread->batches[i].fence);
154 
155    _mesa_HashDeleteAll(glthread->VAOs, free_vao, NULL);
156    _mesa_DeleteHashTable(glthread->VAOs);
157 
158    ctx->GLThread.enabled = false;
159 
160    _mesa_glthread_restore_dispatch(ctx, "destroy");
161 }
162 
163 void
_mesa_glthread_restore_dispatch(struct gl_context * ctx,const char * func)164 _mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func)
165 {
166    /* Remove ourselves from the dispatch table except if another ctx/thread
167     * already installed a new dispatch table.
168     *
169     * Typically glxMakeCurrent will bind a new context (install new table) then
170     * old context might be deleted.
171     */
172    if (_glapi_get_dispatch() == ctx->MarshalExec) {
173        ctx->CurrentClientDispatch = ctx->CurrentServerDispatch;
174        _glapi_set_dispatch(ctx->CurrentClientDispatch);
175 #if 0
176        printf("glthread disabled: %s\n", func);
177 #endif
178    }
179 }
180 
181 void
_mesa_glthread_disable(struct gl_context * ctx,const char * func)182 _mesa_glthread_disable(struct gl_context *ctx, const char *func)
183 {
184    _mesa_glthread_finish_before(ctx, func);
185    _mesa_glthread_restore_dispatch(ctx, func);
186 }
187 
188 void
_mesa_glthread_flush_batch(struct gl_context * ctx)189 _mesa_glthread_flush_batch(struct gl_context *ctx)
190 {
191    struct glthread_state *glthread = &ctx->GLThread;
192    if (!glthread->enabled)
193       return;
194 
195    struct glthread_batch *next = glthread->next_batch;
196    if (!next->used)
197       return;
198 
199    /* Pin threads regularly to the same Zen CCX that the main thread is
200     * running on. The main thread can move between CCXs.
201     */
202    if (util_cpu_caps.nr_cpus != util_cpu_caps.cores_per_L3 &&
203        /* driver support */
204        ctx->Driver.PinDriverToL3Cache &&
205        ++glthread->pin_thread_counter % 128 == 0) {
206       int cpu = util_get_current_cpu();
207 
208       if (cpu >= 0) {
209          unsigned L3_cache = util_cpu_caps.cpu_to_L3[cpu];
210 
211          util_set_thread_affinity(glthread->queue.threads[0],
212                                   util_cpu_caps.L3_affinity_mask[L3_cache],
213                                   NULL, UTIL_MAX_CPUS);
214          ctx->Driver.PinDriverToL3Cache(ctx, L3_cache);
215       }
216    }
217 
218    /* Debug: execute the batch immediately from this thread.
219     *
220     * Note that glthread_unmarshal_batch() changes the dispatch table so we'll
221     * need to restore it when it returns.
222     */
223    if (false) {
224       glthread_unmarshal_batch(next, 0);
225       _glapi_set_dispatch(ctx->CurrentClientDispatch);
226       return;
227    }
228 
229    p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
230 
231    util_queue_add_job(&glthread->queue, next, &next->fence,
232                       glthread_unmarshal_batch, NULL, 0);
233    glthread->last = glthread->next;
234    glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
235    glthread->next_batch = &glthread->batches[glthread->next];
236 }
237 
238 /**
239  * Waits for all pending batches have been unmarshaled.
240  *
241  * This can be used by the main thread to synchronize access to the context,
242  * since the worker thread will be idle after this.
243  */
244 void
_mesa_glthread_finish(struct gl_context * ctx)245 _mesa_glthread_finish(struct gl_context *ctx)
246 {
247    struct glthread_state *glthread = &ctx->GLThread;
248    if (!glthread->enabled)
249       return;
250 
251    /* If this is called from the worker thread, then we've hit a path that
252     * might be called from either the main thread or the worker (such as some
253     * dri interface entrypoints), in which case we don't need to actually
254     * synchronize against ourself.
255     */
256    if (u_thread_is_self(glthread->queue.threads[0]))
257       return;
258 
259    struct glthread_batch *last = &glthread->batches[glthread->last];
260    struct glthread_batch *next = glthread->next_batch;
261    bool synced = false;
262 
263    if (!util_queue_fence_is_signalled(&last->fence)) {
264       util_queue_fence_wait(&last->fence);
265       synced = true;
266    }
267 
268    if (next->used) {
269       p_atomic_add(&glthread->stats.num_direct_items, next->used);
270 
271       /* Since glthread_unmarshal_batch changes the dispatch to direct,
272        * restore it after it's done.
273        */
274       struct _glapi_table *dispatch = _glapi_get_dispatch();
275       glthread_unmarshal_batch(next, 0);
276       _glapi_set_dispatch(dispatch);
277 
278       /* It's not a sync because we don't enqueue partial batches, but
279        * it would be a sync if we did. So count it anyway.
280        */
281       synced = true;
282    }
283 
284    if (synced)
285       p_atomic_inc(&glthread->stats.num_syncs);
286 }
287 
288 void
_mesa_glthread_finish_before(struct gl_context * ctx,const char * func)289 _mesa_glthread_finish_before(struct gl_context *ctx, const char *func)
290 {
291    _mesa_glthread_finish(ctx);
292 
293    /* Uncomment this if you want to know where glthread syncs. */
294    /*printf("fallback to sync: %s\n", func);*/
295 }
296