• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2017 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * on the rights to use, copy, modify, merge, publish, distribute, sub
10  * license, and/or sell copies of the Software, and to permit persons to whom
11  * the Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  **************************************************************************/
26 
27 /* This is a wrapper for pipe_context that executes all pipe_context calls
28  * in another thread.
29  *
30  *
31  * Guidelines for adopters and deviations from Gallium
32  * ---------------------------------------------------
33  *
34  * 1) pipe_context is wrapped. pipe_screen isn't wrapped. All pipe_screen
35  *    driver functions that take a context (fence_finish, texture_get_handle)
36  *    should manually unwrap pipe_context by doing:
37  *      pipe = threaded_context_unwrap_sync(pipe);
38  *
39  *    pipe_context::priv is used to unwrap the context, so drivers and state
40  *    trackers shouldn't use it.
41  *
42  *    No other objects are wrapped.
43  *
44  * 2) Drivers must subclass and initialize these structures:
45  *    - threaded_resource for pipe_resource (use threaded_resource_init/deinit)
46  *    - threaded_query for pipe_query (zero memory)
47  *    - threaded_transfer for pipe_transfer (zero memory)
48  *
49  * 3) The threaded context must not be enabled for contexts that can use video
50  *    codecs.
51  *
52  * 4) Changes in driver behavior:
53  *    - begin_query and end_query always return true; return values from
54  *      the driver are ignored.
55  *    - generate_mipmap uses is_format_supported to determine success;
56  *      the return value from the driver is ignored.
57  *    - resource_commit always returns true; failures are ignored.
58  *    - set_debug_callback is skipped if the callback is synchronous.
59  *
60  *
61  * Thread-safety requirements on context functions
62  * -----------------------------------------------
63  *
64  * These pipe_context functions are executed directly, so they shouldn't use
65  * pipe_context in an unsafe way. They are de-facto screen functions now:
66  * - create_query
67  * - create_batch_query
68  * - create_*_state (all CSOs and shaders)
69  *     - Make sure the shader compiler doesn't use any per-context stuff.
70  *       (e.g. LLVM target machine)
71  *     - Only pipe_context's debug callback for shader dumps is guaranteed to
72  *       be up to date, because set_debug_callback synchronizes execution.
73  * - create_surface
74  * - surface_destroy
75  * - create_sampler_view
76  * - sampler_view_destroy
77  * - stream_output_target_destroy
78  * - transfer_map (only unsychronized buffer mappings)
79  * - get_query_result (when threaded_query::flushed == true)
80  * - create_stream_output_target
81  * - get_sample_position
82  *
83  *
84  * Transfer_map rules for buffer mappings
85  * --------------------------------------
86  *
87  * 1) If transfer_map has PIPE_MAP_UNSYNCHRONIZED, the call is made
88  *    in the non-driver thread without flushing the queue. The driver will
89  *    receive TC_TRANSFER_MAP_THREADED_UNSYNC in addition to PIPE_MAP_-
90  *    UNSYNCHRONIZED to indicate this.
91  *    Note that transfer_unmap is always enqueued and called from the driver
92  *    thread.
93  *
94  * 2) The driver isn't allowed to infer unsychronized mappings by tracking
95  *    the valid buffer range. The threaded context always sends TC_TRANSFER_-
96  *    MAP_NO_INFER_UNSYNCHRONIZED to indicate this. Ignoring the flag will lead
97  *    to failures.
98  *    The threaded context does its own detection of unsynchronized mappings.
99  *
100  * 3) The driver isn't allowed to do buffer invalidations by itself under any
101  *    circumstances. This is necessary for unsychronized maps to map the latest
102  *    version of the buffer. (because invalidations can be queued, while
103  *    unsychronized maps are not queued and they should return the latest
104  *    storage after invalidation). The threaded context always sends
105  *    TC_TRANSFER_MAP_NO_INVALIDATE into transfer_map and buffer_subdata to
106  *    indicate this. Ignoring the flag will lead to failures.
107  *    The threaded context uses its own buffer invalidation mechanism.
108  *    Do NOT use pipe_buffer_write, as this may trigger invalidation;
109  *    use tc_buffer_write instead.
110  *
111  * 4) PIPE_MAP_ONCE can no longer be used to infer that a buffer will not be mapped
112  *    a second time before it is unmapped.
113  *
114  *
115  * Rules for fences
116  * ----------------
117  *
118  * Flushes will be executed asynchronously in the driver thread if a
119  * create_fence callback is provided. This affects fence semantics as follows.
120  *
121  * When the threaded context wants to perform an asynchronous flush, it will
122  * use the create_fence callback to pre-create the fence from the calling
123  * thread. This pre-created fence will be passed to pipe_context::flush
124  * together with the TC_FLUSH_ASYNC flag.
125  *
126  * The callback receives the unwrapped context as a parameter, but must use it
127  * in a thread-safe way because it is called from a non-driver thread.
128  *
129  * If the threaded_context does not immediately flush the current batch, the
130  * callback also receives a tc_unflushed_batch_token. If fence_finish is called
131  * on the returned fence in the context that created the fence,
132  * threaded_context_flush must be called.
133  *
134  * The driver must implement pipe_context::fence_server_sync properly, since
135  * the threaded context handles PIPE_FLUSH_ASYNC.
136  *
137  *
138  * Additional requirements
139  * -----------------------
140  *
141  * get_query_result:
142  *    If threaded_query::flushed == true, get_query_result should assume that
143  *    it's called from a non-driver thread, in which case the driver shouldn't
144  *    use the context in an unsafe way.
145  *
146  * replace_buffer_storage:
147  *    The driver has to implement this callback, which will be called when
148  *    the threaded context wants to replace a resource's backing storage with
149  *    another resource's backing storage. The threaded context uses it to
150  *    implement buffer invalidation. This call is always queued.
151  *    Note that 'minimum_num_rebinds' specifies only the minimum number of rebinds
152  *    which must be managed by the driver; if a buffer is bound multiple times in
153  *    the same binding point (e.g., vertex buffer slots 0,1,2), this will be counted
154  *    as a single rebind.
155  *
156  *
157  * Optional resource busy callbacks for better performance
158  * -------------------------------------------------------
159  *
160  * This adds checking whether a resource is used by the GPU and whether
161  * a resource is referenced by an unflushed command buffer. If neither is true,
162  * the threaded context will map the buffer as UNSYNCHRONIZED without flushing
163  * or synchronizing the thread and will skip any buffer invalidations
164  * (reallocations) because invalidating an idle buffer has no benefit.
165  *
166  * There are 1 driver callback and 1 TC callback:
167  *
168  * 1) is_resource_busy: It returns true when a resource is busy. If this is NULL,
169  *    the resource is considered always busy.
170  *
171  * 2) tc_driver_internal_flush_notify: If the driver set
172  *    driver_calls_flush_notify = true in threaded_context_create, it should
173  *    call this after every internal driver flush. The threaded context uses it
174  *    to track internal driver flushes for the purpose of tracking which
175  *    buffers are referenced by an unflushed command buffer.
176  *
177  * If is_resource_busy is set, threaded_resource::buffer_id_unique must be
178  * generated by the driver, and the replace_buffer_storage callback should
179  * delete the buffer ID passed to it. The driver should use
180  * util_idalloc_mt_init_tc.
181  *
182  *
183  * How it works (queue architecture)
184  * ---------------------------------
185  *
186  * There is a multithreaded queue consisting of batches, each batch containing
187  * 8-byte slots. Calls can occupy 1 or more slots.
188  *
189  * Once a batch is full and there is no space for the next call, it's flushed,
190  * meaning that it's added to the queue for execution in the other thread.
191  * The batches are ordered in a ring and reused once they are idle again.
192  * The batching is necessary for low queue/mutex overhead.
193  */
194 
195 #ifndef U_THREADED_CONTEXT_H
196 #define U_THREADED_CONTEXT_H
197 
198 #include "c11/threads.h"
199 #include "pipe/p_context.h"
200 #include "pipe/p_state.h"
201 #include "util/bitset.h"
202 #include "util/u_inlines.h"
203 #include "util/u_memory.h"
204 #include "util/u_queue.h"
205 #include "util/u_range.h"
206 #include "util/u_thread.h"
207 #include "util/slab.h"
208 #include "util/u_dynarray.h"
209 
210 #ifdef __cplusplus
211 extern "C" {
212 #endif
213 
214 struct threaded_context;
215 struct tc_unflushed_batch_token;
216 
217 /* 0 = disabled, 1 = assertions, 2 = printfs, 3 = logging */
218 #define TC_DEBUG 0
219 
220 /* This is an internal flag not sent to the driver. */
221 #define TC_TRANSFER_MAP_UPLOAD_CPU_STORAGE   (1u << 28)
222 /* These are map flags sent to drivers. */
223 /* Never infer whether it's safe to use unsychronized mappings: */
224 #define TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED (1u << 29)
225 /* Don't invalidate buffers: */
226 #define TC_TRANSFER_MAP_NO_INVALIDATE        (1u << 30)
227 /* transfer_map is called from a non-driver thread: */
228 #define TC_TRANSFER_MAP_THREADED_UNSYNC      (1u << 31)
229 
230 /* Custom flush flags sent to drivers. */
231 /* fence is pre-populated with a fence created by the create_fence callback */
232 #define TC_FLUSH_ASYNC        (1u << 31)
233 
234 /* Size of the queue = number of batch slots in memory.
235  * - 1 batch is always idle and records new commands
236  * - 1 batch is being executed
237  * so the queue size is TC_MAX_BATCHES - 2 = number of waiting batches.
238  *
239  * Use a size as small as possible for low CPU L2 cache usage but large enough
240  * so that the queue isn't stalled too often for not having enough idle batch
241  * slots.
242  */
243 #define TC_MAX_BATCHES        10
244 
245 /* The size of one batch. Non-trivial calls (i.e. not setting a CSO pointer)
246  * can occupy multiple call slots.
247  *
248  * The idea is to have batches as small as possible but large enough so that
249  * the queuing and mutex overhead is negligible.
250  */
251 #define TC_SLOTS_PER_BATCH    1536
252 
253 /* The buffer list queue is much deeper than the batch queue because buffer
254  * lists need to stay around until the driver internally flushes its command
255  * buffer.
256  */
257 #define TC_MAX_BUFFER_LISTS   (TC_MAX_BATCHES * 4)
258 
259 /* This mask is used to get a hash of a buffer ID. It's also the bit size of
260  * the buffer list - 1. It must be 2^n - 1. The size should be as low as
261  * possible to minimize memory usage, but high enough to minimize hash
262  * collisions.
263  */
264 #define TC_BUFFER_ID_MASK      BITFIELD_MASK(14)
265 
266 /* Threshold for when to use the queue or sync. */
267 #define TC_MAX_STRING_MARKER_BYTES  512
268 
269 /* Threshold for when to enqueue buffer/texture_subdata as-is.
270  * If the upload size is greater than this, it will do instead:
271  * - for buffers: DISCARD_RANGE is done by the threaded context
272  * - for textures: sync and call the driver directly
273  */
274 #define TC_MAX_SUBDATA_BYTES        320
275 
276 enum tc_call_id {
277 #define CALL(name) TC_CALL_##name,
278 #include "u_threaded_context_calls.h"
279 #undef CALL
280    TC_NUM_CALLS,
281 };
282 
283 enum tc_binding_type {
284    TC_BINDING_VERTEX_BUFFER,
285    TC_BINDING_STREAMOUT_BUFFER,
286    TC_BINDING_UBO_VS,
287    TC_BINDING_UBO_FS,
288    TC_BINDING_UBO_GS,
289    TC_BINDING_UBO_TCS,
290    TC_BINDING_UBO_TES,
291    TC_BINDING_UBO_CS,
292    TC_BINDING_SAMPLERVIEW_VS,
293    TC_BINDING_SAMPLERVIEW_FS,
294    TC_BINDING_SAMPLERVIEW_GS,
295    TC_BINDING_SAMPLERVIEW_TCS,
296    TC_BINDING_SAMPLERVIEW_TES,
297    TC_BINDING_SAMPLERVIEW_CS,
298    TC_BINDING_SSBO_VS,
299    TC_BINDING_SSBO_FS,
300    TC_BINDING_SSBO_GS,
301    TC_BINDING_SSBO_TCS,
302    TC_BINDING_SSBO_TES,
303    TC_BINDING_SSBO_CS,
304    TC_BINDING_IMAGE_VS,
305    TC_BINDING_IMAGE_FS,
306    TC_BINDING_IMAGE_GS,
307    TC_BINDING_IMAGE_TCS,
308    TC_BINDING_IMAGE_TES,
309    TC_BINDING_IMAGE_CS,
310 };
311 
312 typedef uint16_t (*tc_execute)(struct pipe_context *pipe, void *call);
313 
314 typedef void (*tc_replace_buffer_storage_func)(struct pipe_context *ctx,
315                                                struct pipe_resource *dst,
316                                                struct pipe_resource *src,
317                                                unsigned minimum_num_rebinds,
318                                                uint32_t rebind_mask,
319                                                uint32_t delete_buffer_id);
320 typedef struct pipe_fence_handle *(*tc_create_fence_func)(struct pipe_context *ctx,
321                                                           struct tc_unflushed_batch_token *token);
322 typedef bool (*tc_is_resource_busy)(struct pipe_screen *screen,
323                                     struct pipe_resource *resource,
324                                     unsigned usage);
325 
326 struct threaded_resource {
327    struct pipe_resource b;
328 
329    /* Since buffer invalidations are queued, we can't use the base resource
330     * for unsychronized mappings. This points to the latest version of
331     * the buffer after the latest invalidation. It's only used for unsychro-
332     * nized mappings in the non-driver thread. Initially it's set to &b.
333     */
334    struct pipe_resource *latest;
335 
336    /* Optional CPU storage of the buffer. When we get partial glBufferSubData(implemented by
337     * copy_buffer) + glDrawElements, we don't want to drain the gfx pipeline before executing
338     * the copy. For ideal pipelining, we upload to this CPU storage and then reallocate
339     * the GPU storage completely and reupload everything without copy_buffer.
340     */
341    void *cpu_storage;
342 
343    /* The buffer range which is initialized (with a write transfer, streamout,
344     * or writable shader resources). The remainder of the buffer is considered
345     * invalid and can be mapped unsynchronized.
346     *
347     * This allows unsychronized mapping of a buffer range which hasn't been
348     * used yet. It's for applications which forget to use the unsynchronized
349     * map flag and expect the driver to figure it out.
350     *
351     * Drivers should set this to the full range for buffers backed by user
352     * memory.
353     */
354    struct util_range valid_buffer_range;
355 
356    /* Drivers are required to update this for shared resources and user
357     * pointers. */
358    bool is_shared;
359    bool is_user_ptr;
360    bool allow_cpu_storage;
361 
362    /* internal tag for tc indicating which batch last touched this resource */
363    int8_t last_batch_usage;
364    /* for disambiguating last_batch_usage across batch cycles */
365    uint32_t batch_generation;
366 
367    /* Unique buffer ID. Drivers must set it to non-zero for buffers and it must
368     * be unique. Textures must set 0. Low bits are used as a hash of the ID.
369     * Use util_idalloc_mt to generate these IDs.
370     */
371    uint32_t buffer_id_unique;
372 
373    /* If positive, then a staging transfer is in progress.
374     */
375    int pending_staging_uploads;
376 
377    /* If staging uploads are pending, this will hold the union of the mapped
378     * ranges.
379     */
380    struct util_range pending_staging_uploads_range;
381 };
382 
383 struct threaded_transfer {
384    struct pipe_transfer b;
385 
386    /* Staging buffer for DISCARD_RANGE transfers. */
387    struct pipe_resource *staging;
388 
389    /* If b.resource is not the base instance of the buffer, but it's one of its
390     * reallocations (set in "latest" of the base instance), this points to
391     * the valid range of the base instance. It's used for transfers after
392     * a buffer invalidation, because such transfers operate on "latest", not
393     * the base instance. Initially it's set to &b.resource->valid_buffer_range.
394     */
395    struct util_range *valid_buffer_range;
396 
397    bool cpu_storage_mapped;
398 };
399 
400 struct threaded_query {
401    /* The query is added to the list in end_query and removed in flush. */
402    struct list_head head_unflushed;
403 
404    /* Whether pipe->flush has been called in non-deferred mode after end_query. */
405    bool flushed;
406 };
407 
408 struct tc_call_base {
409 #if !defined(NDEBUG) && TC_DEBUG >= 1
410    uint32_t sentinel;
411 #endif
412    uint16_t num_slots;
413    uint16_t call_id;
414 };
415 
416 struct tc_draw_single {
417    struct tc_call_base base;
418    unsigned index_bias;
419    struct pipe_draw_info info;
420 };
421 
422 /**
423  * A token representing an unflushed batch.
424  *
425  * See the general rules for fences for an explanation.
426  */
427 struct tc_unflushed_batch_token {
428    struct pipe_reference ref;
429    struct threaded_context *tc;
430 };
431 
432 struct tc_renderpass_info {
433    union {
434       struct {
435          /* bitmask of full-cleared color buffers */
436          uint8_t cbuf_clear;
437          /* bitmask of not-full-cleared color buffers */
438          uint8_t cbuf_load;
439          /* bitmask of color buffers that have their stores invalidated */
440          uint8_t cbuf_invalidate;
441          /* whether the zsbuf is full-cleared */
442          bool zsbuf_clear : 1;
443          /* whether the zsbuf is partial-cleared */
444          bool zsbuf_clear_partial : 1;
445          /* whether the zsbuf is not-full-cleared */
446          bool zsbuf_load : 1;
447          /* whether the zsbuf is invalidated */
448          bool zsbuf_invalidate : 1;
449          /* whether a draw occurs */
450          bool has_draw : 1;
451          /* whether a framebuffer resolve occurs on cbuf[0] */
452          bool has_resolve : 1;
453          /* whether queries are ended during this renderpass */
454          bool has_query_ends : 1;
455          uint8_t pad : 1;
456          /* 32 bits offset */
457          /* bitmask of color buffers using fbfetch */
458          uint8_t cbuf_fbfetch;
459          /* whether the fragment shader writes to the zsbuf */
460          bool zsbuf_write_fs : 1;
461          /* whether the DSA state writes to the zsbuf */
462          bool zsbuf_write_dsa : 1;
463          /* whether the DSA state reads the zsbuf */
464          bool zsbuf_read_dsa : 1;
465          /* whether the zsbuf is used for fbfetch */
466          bool zsbuf_fbfetch : 1;
467          uint8_t pad2 : 4;
468          uint16_t pad3;
469       };
470       uint64_t data;
471       /* fb info is in data32[0] */
472       uint32_t data32[2];
473       /* cso info is in data16[2] */
474       uint16_t data16[4];
475       /* zsbuf fb info is in data8[3] */
476       uint8_t data8[8];
477    };
478 };
479 
480 static inline bool
tc_renderpass_info_is_zsbuf_used(const struct tc_renderpass_info * info)481 tc_renderpass_info_is_zsbuf_used(const struct tc_renderpass_info *info)
482 {
483    return info->zsbuf_clear ||
484           info->zsbuf_clear_partial ||
485           info->zsbuf_write_fs ||
486           info->zsbuf_write_dsa ||
487           info->zsbuf_read_dsa ||
488           info->zsbuf_fbfetch;
489 }
490 
491 /* if a driver ends a renderpass early for some reason,
492  * this function can be called to reset any stored renderpass info
493  * to a "safe" state that will avoid data loss on framebuffer attachments
494  *
495  * note: ending a renderpass early if invalidate hints are applied will
496  * result in data loss
497  */
498 static inline void
tc_renderpass_info_reset(struct tc_renderpass_info * info)499 tc_renderpass_info_reset(struct tc_renderpass_info *info)
500 {
501    info->data32[0] = 0;
502    info->cbuf_load = BITFIELD_MASK(8);
503    info->zsbuf_clear_partial = true;
504    info->has_draw = true;
505    info->has_query_ends = true;
506 }
507 
508 struct tc_batch {
509    struct threaded_context *tc;
510 #if !defined(NDEBUG) && TC_DEBUG >= 1
511    unsigned sentinel;
512 #endif
513    uint16_t num_total_slots;
514    uint16_t buffer_list_index;
515    /* the index of the current renderpass info for recording */
516    int16_t renderpass_info_idx;
517    uint16_t max_renderpass_info_idx;
518 
519    /* The last mergeable call that was added to this batch (i.e.
520     * buffer subdata). This might be out-of-date or NULL.
521     */
522    struct tc_call_base *last_mergeable_call;
523 
524    struct util_queue_fence fence;
525    /* whether the first set_framebuffer_state call has been seen by this batch */
526    bool first_set_fb;
527    uint8_t batch_idx;
528    struct tc_unflushed_batch_token *token;
529    uint64_t slots[TC_SLOTS_PER_BATCH];
530    struct util_dynarray renderpass_infos;
531 };
532 
533 struct tc_buffer_list {
534    /* Signalled by the driver after it flushes its internal command buffer. */
535    struct util_queue_fence driver_flushed_fence;
536 
537    /* Buffer list where bit N means whether ID hash N is in the list. */
538    BITSET_DECLARE(buffer_list, TC_BUFFER_ID_MASK + 1);
539 };
540 
541 /**
542  * Optional TC parameters/callbacks.
543  */
544 struct threaded_context_options {
545    tc_create_fence_func create_fence;
546    tc_is_resource_busy is_resource_busy;
547    bool driver_calls_flush_notify;
548 
549    /**
550     * If true, ctx->get_device_reset_status() will be called without
551     * synchronizing with driver thread.  Drivers can enable this to avoid
552     * TC syncs if their implementation of get_device_reset_status() is
553     * safe to call without synchronizing with driver thread.
554     */
555    bool unsynchronized_get_device_reset_status;
556 
557    /* If true, create_fence_fd doesn't access the context in the driver. */
558    bool unsynchronized_create_fence_fd;
559    /* if true, texture_subdata calls may occur unsynchronized with PIPE_MAP_UNSYNCHRONIZED */
560    bool unsynchronized_texture_subdata;
561    /* if true, parse and track renderpass info during execution */
562    bool parse_renderpass_info;
563    /* callbacks for drivers to read their DSA/FS state and update renderpass info accordingly
564     * note: drivers must ONLY append to renderpass info using |=
565     */
566    void (*dsa_parse)(void *state, struct tc_renderpass_info *info);
567    void (*fs_parse)(void *state, struct tc_renderpass_info *info);
568 };
569 
570 struct tc_vertex_buffers {
571    struct tc_call_base base;
572    uint8_t count;
573    struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
574 };
575 
576 struct threaded_context {
577    struct pipe_context base;
578    struct pipe_context *pipe;
579    struct slab_child_pool pool_transfers;
580    tc_replace_buffer_storage_func replace_buffer_storage;
581    struct threaded_context_options options;
582    unsigned map_buffer_alignment;
583    unsigned ubo_alignment;
584 
585    struct list_head unflushed_queries;
586 
587    /* Counters for the HUD. */
588    unsigned num_offloaded_slots;
589    unsigned num_direct_slots;
590    unsigned num_syncs;
591 
592    bool use_forced_staging_uploads;
593    bool add_all_gfx_bindings_to_buffer_list;
594    bool add_all_compute_bindings_to_buffer_list;
595    uint8_t num_queries_active;
596 
597    /* Estimation of how much vram/gtt bytes are mmap'd in
598     * the current tc_batch.
599     */
600    uint64_t bytes_mapped_estimate;
601    uint64_t bytes_mapped_limit;
602 
603    struct util_queue queue;
604    struct util_queue_fence *fence;
605 
606 #ifndef NDEBUG
607    /**
608     * The driver thread is normally the queue thread, but
609     * there are cases where the queue is flushed directly
610     * from the frontend thread
611     */
612    thrd_t driver_thread;
613 #endif
614 
615    bool seen_tcs;
616    bool seen_tes;
617    bool seen_gs;
618    /* whether the current renderpass has seen a set_framebuffer_state call */
619    bool seen_fb_state;
620    /* whether a renderpass is currently active */
621    bool in_renderpass;
622    /* whether a query has ended more recently than a draw */
623    bool query_ended;
624    /* whether pipe_context::flush has been called */
625    bool flushing;
626 
627    bool seen_streamout_buffers;
628    bool seen_shader_buffers[PIPE_SHADER_TYPES];
629    bool seen_image_buffers[PIPE_SHADER_TYPES];
630    bool seen_sampler_buffers[PIPE_SHADER_TYPES];
631 
632    int8_t last_completed;
633 
634    uint8_t num_vertex_buffers;
635    unsigned max_const_buffers;
636    unsigned max_shader_buffers;
637    unsigned max_images;
638    unsigned max_samplers;
639    unsigned nr_cbufs;
640 
641    unsigned last, next, next_buf_list, batch_generation;
642 
643    /* The list fences that the driver should signal after the next flush.
644     * If this is empty, all driver command buffers have been flushed.
645     */
646    struct util_queue_fence *signal_fences_next_flush[TC_MAX_BUFFER_LISTS];
647    unsigned num_signal_fences_next_flush;
648 
649    /* Bound buffers are tracked here using threaded_resource::buffer_id_hash.
650     * 0 means unbound.
651     */
652    uint32_t vertex_buffers[PIPE_MAX_ATTRIBS];
653    uint32_t streamout_buffers[PIPE_MAX_SO_BUFFERS];
654    uint32_t const_buffers[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
655    uint32_t shader_buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
656    uint32_t image_buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
657    uint32_t shader_buffers_writeable_mask[PIPE_SHADER_TYPES];
658    uint64_t image_buffers_writeable_mask[PIPE_SHADER_TYPES];
659    uint32_t sampler_buffers[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
660 
661    struct tc_batch batch_slots[TC_MAX_BATCHES];
662    struct tc_buffer_list buffer_lists[TC_MAX_BUFFER_LISTS];
663    /* the current framebuffer attachments; [PIPE_MAX_COLOR_BUFS] is the zsbuf */
664    struct pipe_resource *fb_resources[PIPE_MAX_COLOR_BUFS + 1];
665    struct pipe_resource *fb_resolve;
666    /* accessed by main thread; preserves info across batches */
667    struct tc_renderpass_info *renderpass_info_recording;
668    /* accessed by driver thread */
669    struct tc_renderpass_info *renderpass_info;
670 
671    /* Callbacks that call pipe_context functions. */
672    tc_execute execute_func[TC_NUM_CALLS];
673 };
674 
675 
676 void threaded_resource_init(struct pipe_resource *res, bool allow_cpu_storage);
677 void threaded_resource_deinit(struct pipe_resource *res);
678 struct pipe_context *threaded_context_unwrap_sync(struct pipe_context *pipe);
679 void tc_driver_internal_flush_notify(struct threaded_context *tc);
680 
681 /** function for getting the current renderpass info:
682  * - renderpass info is always non-null
683  *
684  * Rules:
685  * - threaded context must have been created with parse_renderpass_info=true
686  * - must be called after the driver receives a pipe_context::set_framebuffer_state callback
687  * - must be called after the driver receives a non-deferrable pipe_context::flush callback
688  * - renderpass info must not be used during any internal driver operations (e.g., u_blitter)
689  * - must not be called before the driver receives its first pipe_context::set_framebuffer_state callback
690  * - renderpass info is invalidated only for non-deferrable flushes and new framebuffer states
691  */
692 const struct tc_renderpass_info *
693 threaded_context_get_renderpass_info(struct threaded_context *tc);
694 
695 struct pipe_context *
696 threaded_context_create(struct pipe_context *pipe,
697                         struct slab_parent_pool *parent_transfer_pool,
698                         tc_replace_buffer_storage_func replace_buffer,
699                         const struct threaded_context_options *options,
700                         struct threaded_context **out);
701 
702 void
703 threaded_context_init_bytes_mapped_limit(struct threaded_context *tc, unsigned divisor);
704 
705 void
706 threaded_context_flush(struct pipe_context *_pipe,
707                        struct tc_unflushed_batch_token *token,
708                        bool prefer_async);
709 
710 struct tc_draw_single *
711 tc_add_draw_single_call(struct pipe_context *_pipe,
712                         struct pipe_resource *index_bo);
713 struct pipe_vertex_buffer *
714 tc_add_set_vertex_buffers_call(struct pipe_context *_pipe, unsigned count);
715 
716 void
717 tc_draw_vbo(struct pipe_context *_pipe, const struct pipe_draw_info *info,
718             unsigned drawid_offset,
719             const struct pipe_draw_indirect_info *indirect,
720             const struct pipe_draw_start_count_bias *draws,
721             unsigned num_draws);
722 
723 static inline struct threaded_context *
threaded_context(struct pipe_context * pipe)724 threaded_context(struct pipe_context *pipe)
725 {
726    return (struct threaded_context*)pipe;
727 }
728 
729 static inline struct threaded_resource *
threaded_resource(struct pipe_resource * res)730 threaded_resource(struct pipe_resource *res)
731 {
732    return (struct threaded_resource*)res;
733 }
734 
735 static inline struct threaded_query *
threaded_query(struct pipe_query * q)736 threaded_query(struct pipe_query *q)
737 {
738    return (struct threaded_query*)q;
739 }
740 
741 static inline struct threaded_transfer *
threaded_transfer(struct pipe_transfer * transfer)742 threaded_transfer(struct pipe_transfer *transfer)
743 {
744    return (struct threaded_transfer*)transfer;
745 }
746 
747 static inline void
tc_unflushed_batch_token_reference(struct tc_unflushed_batch_token ** dst,struct tc_unflushed_batch_token * src)748 tc_unflushed_batch_token_reference(struct tc_unflushed_batch_token **dst,
749                                    struct tc_unflushed_batch_token *src)
750 {
751    if (pipe_reference((struct pipe_reference *)*dst, (struct pipe_reference *)src))
752       free(*dst);
753    *dst = src;
754 }
755 
756 /**
757  * Helper for !NDEBUG builds to assert that it is called from driver
758  * thread.  This is to help drivers ensure that various code-paths
759  * are not hit indirectly from pipe entry points that are called from
760  * front-end/state-tracker thread.
761  */
762 static inline void
tc_assert_driver_thread(struct threaded_context * tc)763 tc_assert_driver_thread(struct threaded_context *tc)
764 {
765    if (!tc)
766       return;
767 #ifndef NDEBUG
768    assert(u_thread_is_self(tc->driver_thread));
769 #endif
770 }
771 
772 /**
773  * This is called before GPU stores to disable the CPU storage because
774  * the CPU storage doesn't mirror the GPU storage.
775  *
776  * Drivers should also call it before exporting a DMABUF of a buffer.
777  */
778 static inline void
tc_buffer_disable_cpu_storage(struct pipe_resource * buf)779 tc_buffer_disable_cpu_storage(struct pipe_resource *buf)
780 {
781    struct threaded_resource *tres = threaded_resource(buf);
782 
783    if (tres->cpu_storage) {
784       align_free(tres->cpu_storage);
785       tres->cpu_storage = NULL;
786    }
787    tres->allow_cpu_storage = false;
788 }
789 
790 static inline void
tc_buffer_write(struct pipe_context * pipe,struct pipe_resource * buf,unsigned offset,unsigned size,const void * data)791 tc_buffer_write(struct pipe_context *pipe,
792                 struct pipe_resource *buf,
793                 unsigned offset,
794                 unsigned size,
795                 const void *data)
796 {
797    pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE | TC_TRANSFER_MAP_NO_INVALIDATE, offset, size, data);
798 }
799 
800 static inline struct tc_buffer_list *
tc_get_next_buffer_list(struct pipe_context * _pipe)801 tc_get_next_buffer_list(struct pipe_context *_pipe)
802 {
803    struct threaded_context *tc = threaded_context(_pipe);
804 
805    return &tc->buffer_lists[tc->next_buf_list];
806 }
807 
808 /* Set a buffer binding and add it to the buffer list. */
809 static inline void
tc_bind_buffer(uint32_t * binding,struct tc_buffer_list * next,struct pipe_resource * buf)810 tc_bind_buffer(uint32_t *binding, struct tc_buffer_list *next, struct pipe_resource *buf)
811 {
812    uint32_t id = threaded_resource(buf)->buffer_id_unique;
813    *binding = id;
814    BITSET_SET(next->buffer_list, id & TC_BUFFER_ID_MASK);
815 }
816 
817 /* Reset a buffer binding. */
818 static inline void
tc_unbind_buffer(uint32_t * binding)819 tc_unbind_buffer(uint32_t *binding)
820 {
821    *binding = 0;
822 }
823 
824 static inline void
tc_track_vertex_buffer(struct pipe_context * _pipe,unsigned index,struct pipe_resource * buf,struct tc_buffer_list * next_buffer_list)825 tc_track_vertex_buffer(struct pipe_context *_pipe, unsigned index,
826                          struct pipe_resource *buf,
827                          struct tc_buffer_list *next_buffer_list)
828 {
829    struct threaded_context *tc = threaded_context(_pipe);
830 
831    if (buf) {
832       tc_bind_buffer(&tc->vertex_buffers[index], next_buffer_list, buf);
833    } else {
834       tc_unbind_buffer(&tc->vertex_buffers[index]);
835    }
836 }
837 
838 #ifdef __cplusplus
839 }
840 #endif
841 
842 #endif
843