• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_pipe_control.c
25  *
26  * PIPE_CONTROL is the main flushing and synchronization primitive on Intel
27  * GPUs.  It can invalidate caches, stall until rendering reaches various
28  * stages of completion, write to memory, and other things.  In a way, it's
29  * a swiss army knife command - it has all kinds of capabilities, but some
30  * significant limitations as well.
31  *
32  * Unfortunately, it's notoriously complicated and difficult to use.  Many
33  * sub-commands can't be used together.  Some are meant to be used at the
34  * top of the pipeline (invalidating caches before drawing), while some are
35  * meant to be used at the end (stalling or flushing after drawing).
36  *
37  * Also, there's a list of restrictions a mile long, which vary by generation.
38  * Do this before doing that, or suffer the consequences (usually a GPU hang).
39  *
40  * This file contains helpers for emitting them safely.  You can simply call
41  * iris_emit_pipe_control_flush() with the desired operations (as logical
42  * PIPE_CONTROL_* bits), and it will take care of splitting it into multiple
43  * PIPE_CONTROL commands as necessary.  The per-generation workarounds are
44  * applied in iris_emit_raw_pipe_control() in iris_state.c.
45  */
46 
47 #include "iris_context.h"
48 #include "util/hash_table.h"
49 #include "util/set.h"
50 
51 /**
52  * Emit a PIPE_CONTROL with various flushing flags.
53  *
54  * The caller is responsible for deciding what flags are appropriate for the
55  * given generation.
56  */
57 void
iris_emit_pipe_control_flush(struct iris_batch * batch,const char * reason,uint32_t flags)58 iris_emit_pipe_control_flush(struct iris_batch *batch,
59                              const char *reason,
60                              uint32_t flags)
61 {
62    if ((flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
63        (flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
64       /* A pipe control command with flush and invalidate bits set
65        * simultaneously is an inherently racy operation on Gfx6+ if the
66        * contents of the flushed caches were intended to become visible from
67        * any of the invalidated caches.  Split it in two PIPE_CONTROLs, the
68        * first one should stall the pipeline to make sure that the flushed R/W
69        * caches are coherent with memory once the specified R/O caches are
70        * invalidated.  On pre-Gfx6 hardware the (implicit) R/O cache
71        * invalidation seems to happen at the bottom of the pipeline together
72        * with any write cache flush, so this shouldn't be a concern.  In order
73        * to ensure a full stall, we do an end-of-pipe sync.
74        */
75       iris_emit_end_of_pipe_sync(batch, reason,
76                                  flags & PIPE_CONTROL_CACHE_FLUSH_BITS);
77       flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
78    }
79 
80    batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
81 }
82 
83 /**
84  * Emit a PIPE_CONTROL that writes to a buffer object.
85  *
86  * \p flags should contain one of the following items:
87  *  - PIPE_CONTROL_WRITE_IMMEDIATE
88  *  - PIPE_CONTROL_WRITE_TIMESTAMP
89  *  - PIPE_CONTROL_WRITE_DEPTH_COUNT
90  */
91 void
iris_emit_pipe_control_write(struct iris_batch * batch,const char * reason,uint32_t flags,struct iris_bo * bo,uint32_t offset,uint64_t imm)92 iris_emit_pipe_control_write(struct iris_batch *batch,
93                              const char *reason, uint32_t flags,
94                              struct iris_bo *bo, uint32_t offset,
95                              uint64_t imm)
96 {
97    batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
98 }
99 
100 /*
101  * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
102  *
103  *  Write synchronization is a special case of end-of-pipe
104  *  synchronization that requires that the render cache and/or depth
105  *  related caches are flushed to memory, where the data will become
106  *  globally visible. This type of synchronization is required prior to
107  *  SW (CPU) actually reading the result data from memory, or initiating
108  *  an operation that will use as a read surface (such as a texture
109  *  surface) a previous render target and/or depth/stencil buffer
110  *
111  * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
112  *
113  *  Exercising the write cache flush bits (Render Target Cache Flush
114  *  Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
115  *  ensures the write caches are flushed and doesn't guarantee the data
116  *  is globally visible.
117  *
118  *  SW can track the completion of the end-of-pipe-synchronization by
119  *  using "Notify Enable" and "PostSync Operation - Write Immediate
120  *  Data" in the PIPE_CONTROL command.
121  */
122 void
iris_emit_end_of_pipe_sync(struct iris_batch * batch,const char * reason,uint32_t flags)123 iris_emit_end_of_pipe_sync(struct iris_batch *batch,
124                            const char *reason, uint32_t flags)
125 {
126    /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
127     *
128     *    "The most common action to perform upon reaching a synchronization
129     *    point is to write a value out to memory. An immediate value
130     *    (included with the synchronization command) may be written."
131     *
132     * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
133     *
134     *    "In case the data flushed out by the render engine is to be read
135     *    back in to the render engine in coherent manner, then the render
136     *    engine has to wait for the fence completion before accessing the
137     *    flushed data. This can be achieved by following means on various
138     *    products: PIPE_CONTROL command with CS Stall and the required
139     *    write caches flushed with Post-Sync-Operation as Write Immediate
140     *    Data.
141     *
142     *    Example:
143     *       - Workload-1 (3D/GPGPU/MEDIA)
144     *       - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate
145     *         Data, Required Write Cache Flush bits set)
146     *       - Workload-2 (Can use the data produce or output by Workload-1)
147     */
148    iris_emit_pipe_control_write(batch, reason,
149                                 flags | PIPE_CONTROL_CS_STALL |
150                                 PIPE_CONTROL_WRITE_IMMEDIATE,
151                                 batch->screen->workaround_address.bo,
152                                 batch->screen->workaround_address.offset, 0);
153 }
154 
155 /**
156  * Emits appropriate flushes and invalidations for any previous memory
157  * operations on \p bo to be strictly ordered relative to any subsequent
158  * memory operations performed from the caching domain \p access.
159  *
160  * This is useful because the GPU has separate incoherent caches for the
161  * render target, sampler, etc., which need to be explicitly invalidated or
162  * flushed in order to obtain the expected memory ordering in cases where the
163  * same surface is accessed through multiple caches (e.g. due to
164  * render-to-texture).
165  *
166  * This provides the expected memory ordering guarantees whether or not the
167  * previous access was performed from the same batch or a different one, but
168  * only the former case needs to be handled explicitly here, since the kernel
169  * already inserts implicit flushes and synchronization in order to guarantee
170  * that any data dependencies between batches are satisfied.
171  *
172  * Even though no flushing nor invalidation is required in order to account
173  * for concurrent updates from other batches, we provide the guarantee that a
174  * required synchronization operation due to a previous batch-local update
175  * will never be omitted due to the influence of another thread accessing the
176  * same buffer concurrently from the same caching domain: Such a concurrent
177  * update will only ever change the seqno of the last update to a value
178  * greater than the local value (see iris_bo_bump_seqno()), which means that
179  * we will always emit at least as much flushing and invalidation as we would
180  * have for the local seqno (see the coherent_seqnos comparisons below).
181  */
182 void
iris_emit_buffer_barrier_for(struct iris_batch * batch,struct iris_bo * bo,enum iris_domain access)183 iris_emit_buffer_barrier_for(struct iris_batch *batch,
184                              struct iris_bo *bo,
185                              enum iris_domain access)
186 {
187    const struct intel_device_info *devinfo = &batch->screen->devinfo;
188    const struct brw_compiler *compiler = batch->screen->compiler;
189 
190    const bool access_via_l3 = iris_domain_is_l3_coherent(devinfo, access);
191 
192    const uint32_t all_flush_bits = (PIPE_CONTROL_CACHE_FLUSH_BITS |
193                                     PIPE_CONTROL_STALL_AT_SCOREBOARD |
194                                     PIPE_CONTROL_FLUSH_ENABLE);
195    const uint32_t flush_bits[NUM_IRIS_DOMAINS] = {
196       [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
197       [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
198       [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_FLUSH_HDC,
199       /* OTHER_WRITE includes "VF Cache Invalidate" to make sure that any
200        * stream output writes are finished.  CS stall is added implicitly.
201        */
202       [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_VF_CACHE_INVALIDATE,
203       [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
204       [IRIS_DOMAIN_SAMPLER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
205       [IRIS_DOMAIN_PULL_CONSTANT_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
206       [IRIS_DOMAIN_OTHER_READ] = PIPE_CONTROL_STALL_AT_SCOREBOARD,
207    };
208    const uint32_t invalidate_bits[NUM_IRIS_DOMAINS] = {
209       [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_RENDER_TARGET_FLUSH,
210       [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_DEPTH_CACHE_FLUSH,
211       [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_FLUSH_HDC,
212       [IRIS_DOMAIN_OTHER_WRITE] = PIPE_CONTROL_FLUSH_ENABLE,
213       [IRIS_DOMAIN_VF_READ] = PIPE_CONTROL_VF_CACHE_INVALIDATE,
214       [IRIS_DOMAIN_SAMPLER_READ] = PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE,
215       [IRIS_DOMAIN_PULL_CONSTANT_READ] = PIPE_CONTROL_CONST_CACHE_INVALIDATE |
216          (compiler->indirect_ubos_use_sampler ?
217           PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE :
218           PIPE_CONTROL_DATA_CACHE_FLUSH),
219    };
220    const uint32_t l3_flush_bits[NUM_IRIS_DOMAINS] = {
221       [IRIS_DOMAIN_RENDER_WRITE] = PIPE_CONTROL_TILE_CACHE_FLUSH,
222       [IRIS_DOMAIN_DEPTH_WRITE] = PIPE_CONTROL_TILE_CACHE_FLUSH,
223       [IRIS_DOMAIN_DATA_WRITE] = PIPE_CONTROL_DATA_CACHE_FLUSH,
224    };
225    uint32_t bits = 0;
226 
227    /* Iterate over all read/write domains first in order to handle RaW
228     * and WaW dependencies, which might involve flushing the domain of
229     * the previous access and invalidating the specified domain.
230     */
231    for (unsigned i = 0; i < IRIS_DOMAIN_OTHER_WRITE; i++) {
232       assert(!iris_domain_is_read_only(i));
233       assert(iris_domain_is_l3_coherent(devinfo, i));
234 
235       if (i != access) {
236          const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
237 
238          /* Invalidate unless the most recent read/write access from
239           * this domain is already guaranteed to be visible to the
240           * specified domain.  Flush if the most recent access from
241           * this domain occurred after its most recent flush.
242           */
243          if (seqno > batch->coherent_seqnos[access][i]) {
244             bits |= invalidate_bits[access];
245 
246             if (access_via_l3) {
247                /* Both domains share L3.  If the most recent read/write access
248                 * in domain `i' isn't visible to L3, then flush it to L3.
249                 */
250                if (seqno > batch->l3_coherent_seqnos[i])
251                   bits |= flush_bits[i];
252             } else {
253                /* Domain `i` is L3 coherent but the specified domain is not.
254                 * Flush both this cache and L3 out to memory.
255                 */
256                if (seqno > batch->coherent_seqnos[i][i])
257                   bits |= flush_bits[i] | l3_flush_bits[i];
258             }
259          }
260       }
261    }
262 
263    /* All read-only domains can be considered mutually coherent since
264     * the order of read-only memory operations is immaterial.  If the
265     * specified domain is read/write we need to iterate over them too,
266     * in order to handle any WaR dependencies.
267     */
268    if (!iris_domain_is_read_only(access)) {
269       for (unsigned i = IRIS_DOMAIN_VF_READ; i < NUM_IRIS_DOMAINS; i++) {
270          assert(iris_domain_is_read_only(i));
271          const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
272 
273          const uint64_t last_visible_seqno =
274             iris_domain_is_l3_coherent(devinfo, i) ?
275             batch->l3_coherent_seqnos[i] : batch->coherent_seqnos[i][i];
276 
277          /* Flush if the most recent access from this domain occurred
278           * after its most recent flush.
279           */
280          if (seqno > last_visible_seqno)
281             bits |= flush_bits[i];
282       }
283    }
284 
285    /* The IRIS_DOMAIN_OTHER_WRITE kitchen-sink domain cannot be
286     * considered coherent with itself since it's really a collection
287     * of multiple incoherent read/write domains, so we special-case it
288     * here.
289     */
290    const unsigned i = IRIS_DOMAIN_OTHER_WRITE;
291    const uint64_t seqno = READ_ONCE(bo->last_seqnos[i]);
292 
293    assert(!iris_domain_is_l3_coherent(devinfo, i));
294 
295    /* Invalidate unless the most recent read/write access from this
296     * domain is already guaranteed to be visible to the specified
297     * domain.  Flush if the most recent access from this domain
298     * occurred after its most recent flush.
299     */
300    if (seqno > batch->coherent_seqnos[access][i]) {
301       bits |= invalidate_bits[access];
302 
303       /* There is a non-L3-coherent write that isn't visible to the
304        * specified domain.  If the access is via L3, then it might see
305        * stale L3 data that was loaded before that write.  In this case,
306        * we try to invalidate all read-only sections of the L3 cache.
307        */
308       if (access_via_l3 && seqno > batch->l3_coherent_seqnos[i])
309          bits |= PIPE_CONTROL_L3_RO_INVALIDATE_BITS;
310 
311       if (seqno > batch->coherent_seqnos[i][i])
312          bits |= flush_bits[i];
313    }
314 
315    if (bits) {
316       /* Stall-at-scoreboard is not expected to work in combination with other
317        * flush bits.
318        */
319       if (bits & PIPE_CONTROL_CACHE_FLUSH_BITS)
320          bits &= ~PIPE_CONTROL_STALL_AT_SCOREBOARD;
321 
322       /* Emit any required flushes and invalidations. */
323       if (bits & all_flush_bits)
324          iris_emit_end_of_pipe_sync(batch, "cache tracker: flush",
325                                     bits & all_flush_bits);
326 
327       if (bits & ~all_flush_bits)
328          iris_emit_pipe_control_flush(batch, "cache tracker: invalidate",
329                                       bits & ~all_flush_bits);
330    }
331 }
332 
333 /**
334  * Flush and invalidate all caches (for debugging purposes).
335  */
336 void
iris_flush_all_caches(struct iris_batch * batch)337 iris_flush_all_caches(struct iris_batch *batch)
338 {
339    iris_emit_pipe_control_flush(batch, "debug: flush all caches",
340                                 PIPE_CONTROL_CS_STALL |
341                                 PIPE_CONTROL_DATA_CACHE_FLUSH |
342                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
343                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
344                                 PIPE_CONTROL_TILE_CACHE_FLUSH |
345                                 PIPE_CONTROL_VF_CACHE_INVALIDATE |
346                                 PIPE_CONTROL_INSTRUCTION_INVALIDATE |
347                                 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
348                                 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
349                                 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
350 }
351 
352 static void
iris_texture_barrier(struct pipe_context * ctx,unsigned flags)353 iris_texture_barrier(struct pipe_context *ctx, unsigned flags)
354 {
355    struct iris_context *ice = (void *) ctx;
356    struct iris_batch *render_batch = &ice->batches[IRIS_BATCH_RENDER];
357    struct iris_batch *compute_batch = &ice->batches[IRIS_BATCH_COMPUTE];
358 
359    if (render_batch->contains_draw) {
360       iris_batch_maybe_flush(render_batch, 48);
361       iris_emit_pipe_control_flush(render_batch,
362                                    "API: texture barrier (1/2)",
363                                    PIPE_CONTROL_DEPTH_CACHE_FLUSH |
364                                    PIPE_CONTROL_RENDER_TARGET_FLUSH |
365                                    PIPE_CONTROL_CS_STALL);
366       iris_emit_pipe_control_flush(render_batch,
367                                    "API: texture barrier (2/2)",
368                                    PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
369    }
370 
371    if (compute_batch->contains_draw) {
372       iris_batch_maybe_flush(compute_batch, 48);
373       iris_emit_pipe_control_flush(compute_batch,
374                                    "API: texture barrier (1/2)",
375                                    PIPE_CONTROL_CS_STALL);
376       iris_emit_pipe_control_flush(compute_batch,
377                                    "API: texture barrier (2/2)",
378                                    PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
379    }
380 }
381 
382 static void
iris_memory_barrier(struct pipe_context * ctx,unsigned flags)383 iris_memory_barrier(struct pipe_context *ctx, unsigned flags)
384 {
385    struct iris_context *ice = (void *) ctx;
386    unsigned bits = PIPE_CONTROL_DATA_CACHE_FLUSH | PIPE_CONTROL_CS_STALL;
387 
388    if (flags & (PIPE_BARRIER_VERTEX_BUFFER |
389                 PIPE_BARRIER_INDEX_BUFFER |
390                 PIPE_BARRIER_INDIRECT_BUFFER)) {
391       bits |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
392    }
393 
394    if (flags & PIPE_BARRIER_CONSTANT_BUFFER) {
395       bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
396               PIPE_CONTROL_CONST_CACHE_INVALIDATE;
397    }
398 
399    if (flags & (PIPE_BARRIER_TEXTURE | PIPE_BARRIER_FRAMEBUFFER)) {
400       bits |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
401               PIPE_CONTROL_RENDER_TARGET_FLUSH |
402               PIPE_CONTROL_TILE_CACHE_FLUSH;
403    }
404 
405    iris_foreach_batch(ice, batch) {
406       if (batch->contains_draw) {
407          iris_batch_maybe_flush(batch, 24);
408          iris_emit_pipe_control_flush(batch, "API: memory barrier", bits);
409       }
410    }
411 }
412 
413 void
iris_init_flush_functions(struct pipe_context * ctx)414 iris_init_flush_functions(struct pipe_context *ctx)
415 {
416    ctx->memory_barrier = iris_memory_barrier;
417    ctx->texture_barrier = iris_texture_barrier;
418 }
419