• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 
27 #include "anv_private.h"
28 #include "anv_measure.h"
29 #include "vk_render_pass.h"
30 #include "vk_util.h"
31 
32 #include "util/format_srgb.h"
33 
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36 
37 #include "ds/intel_tracepoints.h"
38 
39 #include "genX_mi_builder.h"
40 #include "genX_cmd_draw_generated_flush.h"
41 
42 static void genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
43                                         uint32_t pipeline);
44 
45 static enum anv_pipe_bits
convert_pc_to_bits(struct GENX (PIPE_CONTROL)* pc)46 convert_pc_to_bits(struct GENX(PIPE_CONTROL) *pc) {
47    enum anv_pipe_bits bits = 0;
48    bits |= (pc->DepthCacheFlushEnable) ?  ANV_PIPE_DEPTH_CACHE_FLUSH_BIT : 0;
49    bits |= (pc->DCFlushEnable) ?  ANV_PIPE_DATA_CACHE_FLUSH_BIT : 0;
50 #if GFX_VERx10 >= 125
51    bits |= (pc->PSSStallSyncEnable) ?  ANV_PIPE_PSS_STALL_SYNC_BIT : 0;
52 #endif
53 #if GFX_VER == 12
54    bits |= (pc->TileCacheFlushEnable) ?  ANV_PIPE_TILE_CACHE_FLUSH_BIT : 0;
55    bits |= (pc->L3FabricFlush) ?  ANV_PIPE_L3_FABRIC_FLUSH_BIT : 0;
56 #endif
57 #if GFX_VER >= 12
58    bits |= (pc->HDCPipelineFlushEnable) ?  ANV_PIPE_HDC_PIPELINE_FLUSH_BIT : 0;
59 #endif
60    bits |= (pc->RenderTargetCacheFlushEnable) ?  ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT : 0;
61    bits |= (pc->VFCacheInvalidationEnable) ?  ANV_PIPE_VF_CACHE_INVALIDATE_BIT : 0;
62    bits |= (pc->StateCacheInvalidationEnable) ?  ANV_PIPE_STATE_CACHE_INVALIDATE_BIT : 0;
63    bits |= (pc->ConstantCacheInvalidationEnable) ?  ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT : 0;
64    bits |= (pc->TextureCacheInvalidationEnable) ?  ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT : 0;
65    bits |= (pc->InstructionCacheInvalidateEnable) ?  ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT : 0;
66    bits |= (pc->StallAtPixelScoreboard) ?  ANV_PIPE_STALL_AT_SCOREBOARD_BIT : 0;
67    bits |= (pc->DepthStallEnable) ?  ANV_PIPE_DEPTH_STALL_BIT : 0;
68    bits |= (pc->CommandStreamerStallEnable) ?  ANV_PIPE_CS_STALL_BIT : 0;
69 #if GFX_VERx10 == 125
70    bits |= (pc->UntypedDataPortCacheFlushEnable) ? ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT : 0;
71    bits |= (pc->CCSFlushEnable) ? ANV_PIPE_CCS_CACHE_FLUSH_BIT : 0;
72 #endif
73    return bits;
74 }
75 
76 #define anv_debug_dump_pc(pc, reason) \
77    if (INTEL_DEBUG(DEBUG_PIPE_CONTROL)) { \
78       fputs("pc : emit PC=( ", stdout); \
79       anv_dump_pipe_bits(convert_pc_to_bits(&(pc)), stdout);   \
80       fprintf(stdout, ") reason: %s\n", reason); \
81    }
82 
83 static inline void
fill_state_base_addr(struct anv_cmd_buffer * cmd_buffer,struct GENX (STATE_BASE_ADDRESS)* sba)84 fill_state_base_addr(struct anv_cmd_buffer *cmd_buffer,
85                      struct GENX(STATE_BASE_ADDRESS) *sba)
86 {
87    struct anv_device *device = cmd_buffer->device;
88    const uint32_t mocs = isl_mocs(&device->isl_dev, 0, false);
89 
90    /* If no API entry point selected the current mode (this can happen if the
91     * first operation in the command buffer is a , select BUFFER if
92     * EXT_descriptor_buffer is enabled, otherwise LEGACY.
93     */
94    if (cmd_buffer->state.pending_db_mode ==
95        ANV_CMD_DESCRIPTOR_BUFFER_MODE_UNKNOWN) {
96       cmd_buffer->state.pending_db_mode =
97          cmd_buffer->device->vk.enabled_extensions.EXT_descriptor_buffer ?
98          ANV_CMD_DESCRIPTOR_BUFFER_MODE_BUFFER :
99          ANV_CMD_DESCRIPTOR_BUFFER_MODE_LEGACY;
100    }
101 
102    *sba = (struct GENX(STATE_BASE_ADDRESS)) { GENX(STATE_BASE_ADDRESS_header), };
103 
104    sba->GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
105    sba->GeneralStateMOCS = mocs;
106    sba->GeneralStateBufferSize = 0xfffff;
107    sba->GeneralStateBaseAddressModifyEnable = true;
108    sba->GeneralStateBufferSizeModifyEnable = true;
109 
110 #if GFX_VERx10 == 120
111    /* Since DG2, scratch surfaces have their own surface state with its own
112     * MOCS setting, but prior to that, the MOCS for scratch accesses are
113     * governed by SBA.StatelessDataPortAccessMOCS.
114     */
115    const isl_surf_usage_flags_t protected_usage =
116       cmd_buffer->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT ?
117       ISL_SURF_USAGE_PROTECTED_BIT : 0;
118    const uint32_t stateless_mocs = isl_mocs(&device->isl_dev, protected_usage, false);
119 #else
120    const uint32_t stateless_mocs = mocs;
121 #endif
122 
123    sba->StatelessDataPortAccessMOCS = stateless_mocs;
124 
125 #if GFX_VERx10 >= 125
126    sba->SurfaceStateBaseAddress =
127       (struct anv_address) { .offset =
128                              device->physical->va.internal_surface_state_pool.addr,
129    };
130 #else
131    sba->SurfaceStateBaseAddress =
132       anv_cmd_buffer_surface_base_address(cmd_buffer);
133 #endif
134    sba->SurfaceStateMOCS = mocs;
135    sba->SurfaceStateBaseAddressModifyEnable = true;
136 
137    sba->IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
138    sba->IndirectObjectMOCS = mocs;
139    sba->IndirectObjectBufferSize = 0xfffff;
140    sba->IndirectObjectBaseAddressModifyEnable = true;
141    sba->IndirectObjectBufferSizeModifyEnable  = true;
142 
143    sba->InstructionBaseAddress =
144       (struct anv_address) { device->instruction_state_pool.block_pool.bo, 0 };
145    sba->InstructionMOCS = mocs;
146    sba->InstructionBufferSize =
147       device->physical->va.instruction_state_pool.size / 4096;
148    sba->InstructionBaseAddressModifyEnable = true;
149    sba->InstructionBuffersizeModifyEnable = true;
150 
151 #if GFX_VER >= 11
152    sba->BindlessSamplerStateBaseAddress = ANV_NULL_ADDRESS;
153    sba->BindlessSamplerStateBufferSize = 0;
154    sba->BindlessSamplerStateMOCS = mocs;
155    sba->BindlessSamplerStateBaseAddressModifyEnable = true;
156 #endif
157 
158    sba->DynamicStateBaseAddress = (struct anv_address) {
159       .offset = device->physical->va.dynamic_state_pool.addr,
160    };
161    sba->DynamicStateBufferSize =
162       (device->physical->va.dynamic_state_pool.size +
163        device->physical->va.dynamic_visible_pool.size +
164        device->physical->va.push_descriptor_buffer_pool.size) / 4096;
165    sba->DynamicStateMOCS = mocs;
166    sba->DynamicStateBaseAddressModifyEnable = true;
167    sba->DynamicStateBufferSizeModifyEnable = true;
168 
169    if (cmd_buffer->state.pending_db_mode == ANV_CMD_DESCRIPTOR_BUFFER_MODE_BUFFER) {
170 #if GFX_VERx10 >= 125
171       sba->BindlessSurfaceStateBaseAddress = (struct anv_address) {
172          .offset = device->physical->va.dynamic_visible_pool.addr,
173       };
174       sba->BindlessSurfaceStateSize =
175          (device->physical->va.dynamic_visible_pool.size +
176           device->physical->va.push_descriptor_buffer_pool.size) - 1;
177       sba->BindlessSurfaceStateMOCS = mocs;
178       sba->BindlessSurfaceStateBaseAddressModifyEnable = true;
179 #else
180       const uint64_t surfaces_addr =
181          cmd_buffer->state.descriptor_buffers.surfaces_address != 0 ?
182          cmd_buffer->state.descriptor_buffers.surfaces_address :
183          anv_address_physical(device->workaround_address);
184       const uint64_t surfaces_size =
185          cmd_buffer->state.descriptor_buffers.surfaces_address != 0 ?
186          MIN2(device->physical->va.dynamic_visible_pool.size -
187               (cmd_buffer->state.descriptor_buffers.surfaces_address -
188                device->physical->va.dynamic_visible_pool.addr),
189               anv_physical_device_bindless_heap_size(device->physical, true)) :
190          (device->workaround_bo->size - device->workaround_address.offset);
191       sba->BindlessSurfaceStateBaseAddress = (struct anv_address) {
192          .offset = surfaces_addr,
193       };
194       sba->BindlessSurfaceStateSize = surfaces_size / ANV_SURFACE_STATE_SIZE - 1;
195       sba->BindlessSurfaceStateMOCS = mocs;
196       sba->BindlessSurfaceStateBaseAddressModifyEnable = true;
197 #endif /* GFX_VERx10 < 125 */
198    } else if (!device->physical->indirect_descriptors) {
199 #if GFX_VERx10 >= 125
200       sba->BindlessSurfaceStateBaseAddress = (struct anv_address) {
201          .offset = device->physical->va.internal_surface_state_pool.addr,
202       };
203       sba->BindlessSurfaceStateSize =
204          (device->physical->va.internal_surface_state_pool.size +
205           device->physical->va.bindless_surface_state_pool.size) - 1;
206       sba->BindlessSurfaceStateMOCS = mocs;
207       sba->BindlessSurfaceStateBaseAddressModifyEnable = true;
208 #else
209       unreachable("Direct descriptor not supported");
210 #endif
211    } else {
212       sba->BindlessSurfaceStateBaseAddress =
213          (struct anv_address) { .offset =
214                                 device->physical->va.bindless_surface_state_pool.addr,
215       };
216       sba->BindlessSurfaceStateSize =
217          anv_physical_device_bindless_heap_size(device->physical, false) /
218          ANV_SURFACE_STATE_SIZE - 1;
219       sba->BindlessSurfaceStateMOCS = mocs;
220       sba->BindlessSurfaceStateBaseAddressModifyEnable = true;
221    }
222 
223 #if GFX_VERx10 >= 125
224    sba->L1CacheControl = L1CC_WB;
225 #endif
226 }
227 
228 void
genX(cmd_buffer_emit_state_base_address)229 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
230 {
231    if (anv_cmd_buffer_is_blitter_queue(cmd_buffer) ||
232        anv_cmd_buffer_is_video_queue(cmd_buffer))
233       return;
234 
235    struct anv_device *device = cmd_buffer->device;
236 
237    struct GENX(STATE_BASE_ADDRESS) sba = {};
238    fill_state_base_addr(cmd_buffer, &sba);
239 
240 #if GFX_VERx10 >= 125
241    struct mi_builder b;
242    mi_builder_init(&b, device->info, &cmd_buffer->batch);
243    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
244    struct mi_goto_target t = MI_GOTO_TARGET_INIT;
245    mi_goto_if(&b,
246               mi_ieq(&b, mi_reg64(ANV_BINDLESS_SURFACE_BASE_ADDR_REG),
247                          mi_imm(sba.BindlessSurfaceStateBaseAddress.offset)),
248               &t);
249 #endif
250 
251    /* Emit a render target cache flush.
252     *
253     * This isn't documented anywhere in the PRM.  However, it seems to be
254     * necessary prior to changing the surface state base address.  Without
255     * this, we get GPU hangs when using multi-level command buffers which
256     * clear depth, reset state base address, and then go render stuff.
257     *
258     * Render target cache flush before SBA is required by Wa_18039438632.
259     */
260    genx_batch_emit_pipe_control(&cmd_buffer->batch, device->info,
261                                 cmd_buffer->state.current_pipeline,
262 #if GFX_VER >= 12
263                                 ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
264 #else
265                                 ANV_PIPE_DATA_CACHE_FLUSH_BIT |
266 #endif
267                                 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
268                                 ANV_PIPE_CS_STALL_BIT);
269 
270 #if INTEL_NEEDS_WA_1607854226
271    /* Wa_1607854226:
272     *
273     *  Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
274     *  mode by putting the pipeline temporarily in 3D mode.
275     */
276    uint32_t gfx12_wa_pipeline = cmd_buffer->state.current_pipeline;
277    genX(flush_pipeline_select_3d)(cmd_buffer);
278 #endif
279 
280    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), _sba) {
281       _sba = sba;
282    }
283 
284    if (cmd_buffer->state.current_db_mode != cmd_buffer->state.pending_db_mode)
285       cmd_buffer->state.current_db_mode = cmd_buffer->state.pending_db_mode;
286 
287 #if INTEL_NEEDS_WA_1607854226
288    /* Wa_1607854226:
289     *
290     *  Put the pipeline back into its current mode.
291     */
292    if (gfx12_wa_pipeline != UINT32_MAX)
293       genX(flush_pipeline_select)(cmd_buffer, gfx12_wa_pipeline);
294 #endif
295 
296    /* After re-setting the surface state base address, we have to do some
297     * cache flushing so that the sampler engine will pick up the new
298     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
299     * Shared Function > 3D Sampler > State > State Caching (page 96):
300     *
301     *    Coherency with system memory in the state cache, like the texture
302     *    cache is handled partially by software. It is expected that the
303     *    command stream or shader will issue Cache Flush operation or
304     *    Cache_Flush sampler message to ensure that the L1 cache remains
305     *    coherent with system memory.
306     *
307     *    [...]
308     *
309     *    Whenever the value of the Dynamic_State_Base_Addr,
310     *    Surface_State_Base_Addr are altered, the L1 state cache must be
311     *    invalidated to ensure the new surface or sampler state is fetched
312     *    from system memory.
313     *
314     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
315     * which, according the PIPE_CONTROL instruction documentation in the
316     * Broadwell PRM:
317     *
318     *    Setting this bit is independent of any other bit in this packet.
319     *    This bit controls the invalidation of the L1 and L2 state caches
320     *    at the top of the pipe i.e. at the parsing time.
321     *
322     * Unfortunately, experimentation seems to indicate that state cache
323     * invalidation through a PIPE_CONTROL does nothing whatsoever in
324     * regards to surface state and binding tables.  In stead, it seems that
325     * invalidating the texture cache is what is actually needed.
326     *
327     * XXX:  As far as we have been able to determine through
328     * experimentation, shows that flush the texture cache appears to be
329     * sufficient.  The theory here is that all of the sampling/rendering
330     * units cache the binding table in the texture cache.  However, we have
331     * yet to be able to actually confirm this.
332     *
333     * Wa_14013910100:
334     *
335     *  "DG2 128/256/512-A/B: S/W must program STATE_BASE_ADDRESS command twice
336     *   or program pipe control with Instruction cache invalidate post
337     *   STATE_BASE_ADDRESS command"
338     */
339    enum anv_pipe_bits bits =
340       ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
341       ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT |
342       ANV_PIPE_STATE_CACHE_INVALIDATE_BIT |
343       (intel_needs_workaround(device->info, 16013000631) ?
344        ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT : 0);
345 
346    genx_batch_emit_pipe_control(&cmd_buffer->batch, device->info,
347                                 cmd_buffer->state.current_pipeline,
348                                 bits);
349 
350    assert(cmd_buffer->state.current_db_mode !=
351           ANV_CMD_DESCRIPTOR_BUFFER_MODE_UNKNOWN);
352 
353 #if GFX_VERx10 >= 125
354    assert(sba.BindlessSurfaceStateBaseAddress.offset != 0);
355    mi_store(&b, mi_reg64(ANV_BINDLESS_SURFACE_BASE_ADDR_REG),
356                 mi_imm(sba.BindlessSurfaceStateBaseAddress.offset));
357 
358    mi_goto_target(&b, &t);
359 #endif
360 
361 #if GFX_VERx10 >= 125
362    genX(cmd_buffer_emit_bt_pool_base_address)(cmd_buffer);
363 #endif
364 
365    /* If we have emitted a new state base address we probably need to re-emit
366     * binding tables.
367     */
368    cmd_buffer->state.descriptors_dirty |= ~0;
369 }
370 
371 void
genX(cmd_buffer_emit_bt_pool_base_address)372 genX(cmd_buffer_emit_bt_pool_base_address)(struct anv_cmd_buffer *cmd_buffer)
373 {
374    if (!anv_cmd_buffer_is_render_or_compute_queue(cmd_buffer))
375       return;
376 
377    /* If we are emitting a new state base address we probably need to re-emit
378     * binding tables.
379     */
380    cmd_buffer->state.descriptors_dirty |= ~0;
381 
382 #if GFX_VERx10 >= 125
383    struct anv_device *device = cmd_buffer->device;
384    const uint32_t mocs = isl_mocs(&device->isl_dev, 0, false);
385 
386    /* We're changing base location of binding tables which affects the state
387     * cache. We're adding texture cache invalidation following a
388     * recommendation from the ICL PRMs, Volume 9: Render Engine, Coherency
389     * Mechanisms:
390     *
391     *    "It is strongly recommended that a Texture cache invalidation be done
392     *     whenever a State cache invalidation is done."
393     *
394     * Prior to do the invalidation, we need a CS_STALL to ensure that all work
395     * using surface states has completed.
396     */
397    genx_batch_emit_pipe_control(&cmd_buffer->batch,
398                                 cmd_buffer->device->info,
399                                 cmd_buffer->state.current_pipeline,
400                                 ANV_PIPE_CS_STALL_BIT);
401    anv_batch_emit(
402       &cmd_buffer->batch, GENX(3DSTATE_BINDING_TABLE_POOL_ALLOC), btpa) {
403       btpa.BindingTablePoolBaseAddress =
404          anv_cmd_buffer_surface_base_address(cmd_buffer);
405       btpa.BindingTablePoolBufferSize = device->physical->va.binding_table_pool.size / 4096;
406       btpa.MOCS = mocs;
407    }
408    genx_batch_emit_pipe_control(&cmd_buffer->batch,
409                                 cmd_buffer->device->info,
410                                 cmd_buffer->state.current_pipeline,
411                                 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
412                                 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT);
413 
414 #else /* GFX_VERx10 < 125 */
415    genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
416 #endif
417 }
418 
419 static void
add_surface_reloc(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr)420 add_surface_reloc(struct anv_cmd_buffer *cmd_buffer,
421                   struct anv_address addr)
422 {
423    VkResult result = anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
424                                            addr.bo);
425 
426    if (unlikely(result != VK_SUCCESS))
427       anv_batch_set_error(&cmd_buffer->batch, result);
428 }
429 
430 static void
add_surface_state_relocs(struct anv_cmd_buffer * cmd_buffer,const struct anv_surface_state * state)431 add_surface_state_relocs(struct anv_cmd_buffer *cmd_buffer,
432                          const struct anv_surface_state *state)
433 {
434    assert(!anv_address_is_null(state->address));
435    add_surface_reloc(cmd_buffer, state->address);
436 
437    if (!anv_address_is_null(state->aux_address)) {
438       VkResult result =
439          anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
440                                state->aux_address.bo);
441       if (result != VK_SUCCESS)
442          anv_batch_set_error(&cmd_buffer->batch, result);
443    }
444 
445    if (!anv_address_is_null(state->clear_address)) {
446       VkResult result =
447          anv_reloc_list_add_bo(&cmd_buffer->surface_relocs,
448                                state->clear_address.bo);
449       if (result != VK_SUCCESS)
450          anv_batch_set_error(&cmd_buffer->batch, result);
451    }
452 }
453 
454 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
455  * the initial layout is undefined, the HiZ buffer and depth buffer will
456  * represent the same data at the end of this operation.
457  */
458 static void
transition_depth_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,bool will_full_fast_clear)459 transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
460                         const struct anv_image *image,
461                         uint32_t base_level, uint32_t level_count,
462                         uint32_t base_layer, uint32_t layer_count,
463                         VkImageLayout initial_layout,
464                         VkImageLayout final_layout,
465                         bool will_full_fast_clear)
466 {
467    const uint32_t depth_plane =
468       anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_DEPTH_BIT);
469    if (image->planes[depth_plane].aux_usage == ISL_AUX_USAGE_NONE)
470       return;
471 
472    /* Initialize the indirect clear color prior to first use. */
473    const enum isl_format depth_format =
474       image->planes[depth_plane].primary_surface.isl.format;
475    const struct anv_address clear_color_addr =
476       anv_image_get_clear_color_addr(cmd_buffer->device, image, depth_format,
477                                      VK_IMAGE_ASPECT_DEPTH_BIT, true);
478    if (!anv_address_is_null(clear_color_addr) &&
479        (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
480         initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
481       const union isl_color_value clear_value =
482          anv_image_hiz_clear_value(image);
483 
484       uint32_t depth_value[4] = {};
485       isl_color_value_pack(&clear_value, depth_format, depth_value);
486 
487       const uint32_t clear_pixel_offset = clear_color_addr.offset +
488          isl_get_sampler_clear_field_offset(cmd_buffer->device->info,
489                                             depth_format);
490       const struct anv_address clear_pixel_addr = {
491          .bo = clear_color_addr.bo,
492          .offset = clear_pixel_offset,
493       };
494 
495       struct mi_builder b;
496       mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
497       mi_builder_set_write_check(&b, true);
498       mi_store(&b, mi_mem32(clear_pixel_addr), mi_imm(depth_value[0]));
499    }
500 
501    /* If will_full_fast_clear is set, the caller promises to fast-clear the
502     * largest portion of the specified range as it can.
503     */
504    if (will_full_fast_clear)
505       return;
506 
507    const enum isl_aux_state initial_state =
508       anv_layout_to_aux_state(cmd_buffer->device->info, image,
509                               VK_IMAGE_ASPECT_DEPTH_BIT,
510                               initial_layout,
511                               cmd_buffer->queue_family->queueFlags);
512    const enum isl_aux_state final_state =
513       anv_layout_to_aux_state(cmd_buffer->device->info, image,
514                               VK_IMAGE_ASPECT_DEPTH_BIT,
515                               final_layout,
516                               cmd_buffer->queue_family->queueFlags);
517 
518    const bool initial_depth_valid =
519       isl_aux_state_has_valid_primary(initial_state);
520    const bool initial_hiz_valid =
521       isl_aux_state_has_valid_aux(initial_state);
522    const bool final_needs_depth =
523       isl_aux_state_has_valid_primary(final_state);
524    const bool final_needs_hiz =
525       isl_aux_state_has_valid_aux(final_state);
526 
527    /* Getting into the pass-through state for Depth is tricky and involves
528     * both a resolve and an ambiguate.  We don't handle that state right now
529     * as anv_layout_to_aux_state never returns it.
530     */
531    assert(final_state != ISL_AUX_STATE_PASS_THROUGH);
532 
533    enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
534    if (final_needs_depth && !initial_depth_valid) {
535       assert(initial_hiz_valid);
536       hiz_op = ISL_AUX_OP_FULL_RESOLVE;
537    } else if (final_needs_hiz && !initial_hiz_valid) {
538       assert(initial_depth_valid);
539       hiz_op = ISL_AUX_OP_AMBIGUATE;
540    }
541 
542    if (hiz_op != ISL_AUX_OP_NONE) {
543       for (uint32_t l = 0; l < level_count; l++) {
544          const uint32_t level = base_level + l;
545 
546          uint32_t aux_layers =
547             anv_image_aux_layers(image, VK_IMAGE_ASPECT_DEPTH_BIT, level);
548          if (base_layer >= aux_layers)
549             break; /* We will only get fewer layers as level increases */
550          uint32_t level_layer_count =
551             MIN2(layer_count, aux_layers - base_layer);
552 
553          anv_image_hiz_op(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
554                           level, base_layer, level_layer_count, hiz_op);
555       }
556    }
557 
558    /* Additional tile cache flush for MTL:
559     *
560     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/10420
561     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/10530
562     */
563    if (intel_device_info_is_mtl(cmd_buffer->device->info) &&
564        image->planes[depth_plane].aux_usage == ISL_AUX_USAGE_HIZ_CCS &&
565        final_needs_depth && !initial_depth_valid) {
566       anv_add_pending_pipe_bits(cmd_buffer,
567                                 ANV_PIPE_TILE_CACHE_FLUSH_BIT,
568                                 "HIZ-CCS flush");
569    }
570 }
571 
572 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
573  * the initial layout is undefined, the HiZ buffer and depth buffer will
574  * represent the same data at the end of this operation.
575  */
576 static void
transition_stencil_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,bool will_full_fast_clear)577 transition_stencil_buffer(struct anv_cmd_buffer *cmd_buffer,
578                           const struct anv_image *image,
579                           uint32_t base_level, uint32_t level_count,
580                           uint32_t base_layer, uint32_t layer_count,
581                           VkImageLayout initial_layout,
582                           VkImageLayout final_layout,
583                           bool will_full_fast_clear)
584 {
585 #if GFX_VER == 12
586    const uint32_t plane =
587       anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_STENCIL_BIT);
588    if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
589       return;
590 
591    if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
592         initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
593        cmd_buffer->device->info->has_aux_map) {
594       /* If will_full_fast_clear is set, the caller promises to fast-clear the
595        * largest portion of the specified range as it can.
596        */
597       if (will_full_fast_clear)
598          return;
599 
600       for (uint32_t l = 0; l < level_count; l++) {
601          const uint32_t level = base_level + l;
602          const VkRect2D clear_rect = {
603             .offset.x = 0,
604             .offset.y = 0,
605             .extent.width = u_minify(image->vk.extent.width, level),
606             .extent.height = u_minify(image->vk.extent.height, level),
607          };
608 
609          uint32_t aux_layers =
610             anv_image_aux_layers(image, VK_IMAGE_ASPECT_STENCIL_BIT, level);
611 
612          if (base_layer >= aux_layers)
613             break; /* We will only get fewer layers as level increases */
614 
615          uint32_t level_layer_count =
616             MIN2(layer_count, aux_layers - base_layer);
617 
618          /* From Bspec's 3DSTATE_STENCIL_BUFFER_BODY > Stencil Compression
619           * Enable:
620           *
621           *    "When enabled, Stencil Buffer needs to be initialized via
622           *    stencil clear (HZ_OP) before any renderpass."
623           */
624          const VkClearDepthStencilValue clear_value = {};
625          anv_image_hiz_clear(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
626                              level, base_layer, level_layer_count,
627                              clear_rect, &clear_value);
628       }
629    }
630 
631    /* Additional tile cache flush for MTL:
632     *
633     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/10420
634     * https://gitlab.freedesktop.org/mesa/mesa/-/issues/10530
635     */
636    if (intel_device_info_is_mtl(cmd_buffer->device->info)) {
637       anv_add_pending_pipe_bits(cmd_buffer,
638                                 ANV_PIPE_TILE_CACHE_FLUSH_BIT,
639                                 "HIZ-CCS flush");
640    }
641 #endif
642 }
643 
644 #define MI_PREDICATE_SRC0    0x2400
645 #define MI_PREDICATE_SRC1    0x2408
646 #define MI_PREDICATE_RESULT  0x2418
647 
648 static void
set_image_compressed_bit(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t base_layer,uint32_t layer_count,bool compressed)649 set_image_compressed_bit(struct anv_cmd_buffer *cmd_buffer,
650                          const struct anv_image *image,
651                          VkImageAspectFlagBits aspect,
652                          uint32_t level,
653                          uint32_t base_layer, uint32_t layer_count,
654                          bool compressed)
655 {
656    const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
657 
658    /* We only have compression tracking for CCS_E */
659    if (!isl_aux_usage_has_ccs_e(image->planes[plane].aux_usage))
660       return;
661 
662    struct anv_device *device = cmd_buffer->device;
663    struct mi_builder b;
664    mi_builder_init(&b, device->info, &cmd_buffer->batch);
665    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
666 
667    for (uint32_t a = 0; a < layer_count; a++) {
668       uint32_t layer = base_layer + a;
669       struct anv_address comp_state_addr =
670          anv_image_get_compression_state_addr(device,
671                                               image, aspect,
672                                               level, layer);
673       mi_store(&b, mi_mem32(comp_state_addr),
674                    mi_imm(compressed ? UINT32_MAX : 0));
675    }
676 
677    /* FCV_CCS_E images are automatically fast cleared to default value at
678     * render time. In order to account for this, anv should set the the
679     * appropriate fast clear state for level0/layer0.
680     *
681     * At the moment, tracking the fast clear state for higher levels/layers is
682     * neither supported, nor do we enter a situation where it is a concern.
683     */
684    if (image->planes[plane].aux_usage == ISL_AUX_USAGE_FCV_CCS_E &&
685        base_layer == 0 && level == 0) {
686       struct anv_address fc_type_addr =
687          anv_image_get_fast_clear_type_addr(device, image, aspect);
688       mi_store(&b, mi_mem32(fc_type_addr),
689                    mi_imm(ANV_FAST_CLEAR_DEFAULT_VALUE));
690    }
691 }
692 
693 static void
set_image_fast_clear_state(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,enum anv_fast_clear_type fast_clear)694 set_image_fast_clear_state(struct anv_cmd_buffer *cmd_buffer,
695                            const struct anv_image *image,
696                            VkImageAspectFlagBits aspect,
697                            enum anv_fast_clear_type fast_clear)
698 {
699    struct anv_device *device = cmd_buffer->device;
700    struct mi_builder b;
701    mi_builder_init(&b, device->info, &cmd_buffer->batch);
702    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
703 
704    struct anv_address fc_type_addr =
705       anv_image_get_fast_clear_type_addr(device, image, aspect);
706    mi_store(&b, mi_mem32(fc_type_addr), mi_imm(fast_clear));
707 
708    /* Whenever we have fast-clear, we consider that slice to be compressed.
709     * This makes building predicates much easier.
710     */
711    if (fast_clear != ANV_FAST_CLEAR_NONE)
712       set_image_compressed_bit(cmd_buffer, image, aspect, 0, 0, 1, true);
713 }
714 
715 /* This is only really practical on haswell and above because it requires
716  * MI math in order to get it correct.
717  */
718 static void
anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)719 anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
720                                   const struct anv_image *image,
721                                   VkImageAspectFlagBits aspect,
722                                   uint32_t level, uint32_t array_layer,
723                                   enum isl_aux_op resolve_op,
724                                   enum anv_fast_clear_type fast_clear_supported)
725 {
726    struct anv_device *device = cmd_buffer->device;
727    struct anv_address addr =
728       anv_image_get_fast_clear_type_addr(device, image, aspect);
729    struct mi_builder b;
730    mi_builder_init(&b, device->info, &cmd_buffer->batch);
731    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
732 
733    const struct mi_value fast_clear_type = mi_mem32(addr);
734 
735    if (resolve_op == ISL_AUX_OP_FULL_RESOLVE) {
736       /* In this case, we're doing a full resolve which means we want the
737        * resolve to happen if any compression (including fast-clears) is
738        * present.
739        *
740        * In order to simplify the logic a bit, we make the assumption that,
741        * if the first slice has been fast-cleared, it is also marked as
742        * compressed.  See also set_image_fast_clear_state.
743        */
744       const struct mi_value compression_state =
745          mi_mem32(anv_image_get_compression_state_addr(device,
746                                                        image, aspect,
747                                                        level, array_layer));
748       mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), compression_state);
749       mi_store(&b, compression_state, mi_imm(0));
750 
751       if (level == 0 && array_layer == 0) {
752          /* If the predicate is true, we want to write 0 to the fast clear type
753           * and, if it's false, leave it alone.  We can do this by writing
754           *
755           * clear_type = clear_type & ~predicate;
756           */
757          struct mi_value new_fast_clear_type =
758             mi_iand(&b, fast_clear_type,
759                         mi_inot(&b, mi_reg64(MI_PREDICATE_SRC0)));
760          mi_store(&b, fast_clear_type, new_fast_clear_type);
761       }
762    } else if (level == 0 && array_layer == 0) {
763       /* In this case, we are doing a partial resolve to get rid of fast-clear
764        * colors.  We don't care about the compression state but we do care
765        * about how much fast clear is allowed by the final layout.
766        */
767       assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
768       assert(fast_clear_supported < ANV_FAST_CLEAR_ANY);
769 
770       /* We need to compute (fast_clear_supported < image->fast_clear) */
771       struct mi_value pred =
772          mi_ult(&b, mi_imm(fast_clear_supported), fast_clear_type);
773       mi_store(&b, mi_reg64(MI_PREDICATE_SRC0), mi_value_ref(&b, pred));
774 
775       /* If the predicate is true, we want to write 0 to the fast clear type
776        * and, if it's false, leave it alone.  We can do this by writing
777        *
778        * clear_type = clear_type & ~predicate;
779        */
780       struct mi_value new_fast_clear_type =
781          mi_iand(&b, fast_clear_type, mi_inot(&b, pred));
782       mi_store(&b, fast_clear_type, new_fast_clear_type);
783    } else {
784       /* In this case, we're trying to do a partial resolve on a slice that
785        * doesn't have clear color.  There's nothing to do.
786        */
787       assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
788       return;
789    }
790 
791    /* Set src1 to 0 and use a != condition */
792    mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
793 
794    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
795       mip.LoadOperation    = LOAD_LOADINV;
796       mip.CombineOperation = COMBINE_SET;
797       mip.CompareOperation = COMPARE_SRCS_EQUAL;
798    }
799 }
800 
801 static void
anv_cmd_predicated_ccs_resolve(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,enum isl_format format,struct isl_swizzle swizzle,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)802 anv_cmd_predicated_ccs_resolve(struct anv_cmd_buffer *cmd_buffer,
803                                const struct anv_image *image,
804                                enum isl_format format,
805                                struct isl_swizzle swizzle,
806                                VkImageAspectFlagBits aspect,
807                                uint32_t level, uint32_t array_layer,
808                                enum isl_aux_op resolve_op,
809                                enum anv_fast_clear_type fast_clear_supported)
810 {
811    const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
812 
813    anv_cmd_compute_resolve_predicate(cmd_buffer, image,
814                                      aspect, level, array_layer,
815                                      resolve_op, fast_clear_supported);
816 
817    /* CCS_D only supports full resolves and BLORP will assert on us if we try
818     * to do a partial resolve on a CCS_D surface.
819     */
820    if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
821        image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_D)
822       resolve_op = ISL_AUX_OP_FULL_RESOLVE;
823 
824    anv_image_ccs_op(cmd_buffer, image, format, swizzle, aspect,
825                     level, array_layer, 1, resolve_op, NULL, true);
826 }
827 
828 static void
anv_cmd_predicated_mcs_resolve(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,enum isl_format format,struct isl_swizzle swizzle,VkImageAspectFlagBits aspect,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)829 anv_cmd_predicated_mcs_resolve(struct anv_cmd_buffer *cmd_buffer,
830                                const struct anv_image *image,
831                                enum isl_format format,
832                                struct isl_swizzle swizzle,
833                                VkImageAspectFlagBits aspect,
834                                uint32_t array_layer,
835                                enum isl_aux_op resolve_op,
836                                enum anv_fast_clear_type fast_clear_supported)
837 {
838    assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
839    assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
840 
841    anv_cmd_compute_resolve_predicate(cmd_buffer, image,
842                                      aspect, 0, array_layer,
843                                      resolve_op, fast_clear_supported);
844 
845    anv_image_mcs_op(cmd_buffer, image, format, swizzle, aspect,
846                     array_layer, 1, resolve_op, NULL, true);
847 }
848 
849 void
genX(cmd_buffer_mark_image_written)850 genX(cmd_buffer_mark_image_written)(struct anv_cmd_buffer *cmd_buffer,
851                                     const struct anv_image *image,
852                                     VkImageAspectFlagBits aspect,
853                                     enum isl_aux_usage aux_usage,
854                                     uint32_t level,
855                                     uint32_t base_layer,
856                                     uint32_t layer_count)
857 {
858 #if GFX_VER < 20
859    /* The aspect must be exactly one of the image aspects. */
860    assert(util_bitcount(aspect) == 1 && (aspect & image->vk.aspects));
861 
862    /* Filter out aux usages that don't have any compression tracking.
863     * Note: We only have compression tracking for CCS_E images, but it's
864     * possible for a CCS_E enabled image to have a subresource with a different
865     * aux usage.
866     */
867    if (!isl_aux_usage_has_compression(aux_usage))
868       return;
869 
870    set_image_compressed_bit(cmd_buffer, image, aspect,
871                             level, base_layer, layer_count, true);
872 #endif
873 }
874 
875 /* Copy the fast-clear value dword(s) between a surface state object and an
876  * image's fast clear state buffer.
877  */
878 void
genX(cmd_buffer_load_clear_color)879 genX(cmd_buffer_load_clear_color)(struct anv_cmd_buffer *cmd_buffer,
880                                   struct anv_state surface_state,
881                                   const struct anv_image_view *iview)
882 {
883 #if GFX_VER < 10
884    struct anv_address ss_clear_addr =
885       anv_state_pool_state_address(
886          &cmd_buffer->device->internal_surface_state_pool,
887          (struct anv_state) {
888             .offset = surface_state.offset +
889                       cmd_buffer->device->isl_dev.ss.clear_value_offset
890          });
891    const struct anv_address entry_addr =
892       anv_image_get_clear_color_addr(cmd_buffer->device, iview->image,
893                                      iview->planes[0].isl.format,
894                                      VK_IMAGE_ASPECT_COLOR_BIT, false);
895 
896    unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
897 
898    struct mi_builder b;
899    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
900    mi_builder_set_write_check(&b, true);
901 
902    mi_memcpy(&b, ss_clear_addr, entry_addr, copy_size);
903 
904    /* Updating a surface state object may require that the state cache be
905     * invalidated. From the SKL PRM, Shared Functions -> State -> State
906     * Caching:
907     *
908     *    Whenever the RENDER_SURFACE_STATE object in memory pointed to by
909     *    the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
910     *    modified [...], the L1 state cache must be invalidated to ensure
911     *    the new surface or sampler state is fetched from system memory.
912     *
913     * In testing, SKL doesn't actually seem to need this, but HSW does.
914     */
915    anv_add_pending_pipe_bits(cmd_buffer,
916                              ANV_PIPE_STATE_CACHE_INVALIDATE_BIT,
917                              "after load_clear_color surface state update");
918 #endif
919 }
920 
921 static void
set_image_clear_color(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,const VkImageAspectFlags aspect,const uint32_t * pixel)922 set_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
923                       const struct anv_image *image,
924                       const VkImageAspectFlags aspect,
925                       const uint32_t *pixel)
926 {
927    for (int i = 0; i < image->num_view_formats; i++) {
928       union isl_color_value clear_color;
929       isl_color_value_unpack(&clear_color, image->view_formats[i], pixel);
930 
931       UNUSED union isl_color_value sample_color = clear_color;
932       if (isl_format_is_srgb(image->view_formats[i])) {
933          sample_color.f32[0] =
934             util_format_linear_to_srgb_float(clear_color.f32[0]);
935          sample_color.f32[1] =
936             util_format_linear_to_srgb_float(clear_color.f32[1]);
937          sample_color.f32[2] =
938             util_format_linear_to_srgb_float(clear_color.f32[2]);
939       }
940 
941       const struct anv_address addr =
942          anv_image_get_clear_color_addr(cmd_buffer->device, image,
943                                         image->view_formats[i], aspect,
944                                         false);
945       assert(!anv_address_is_null(addr));
946 
947 #if GFX_VER >= 11
948       assert(cmd_buffer->device->isl_dev.ss.clear_color_state_size == 32);
949       uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 3 + 6,
950                                      GENX(MI_STORE_DATA_IMM),
951                                      .StoreQword = true, .Address = addr);
952       dw[3] = clear_color.u32[0];
953       dw[4] = clear_color.u32[1];
954       dw[5] = clear_color.u32[2];
955       dw[6] = clear_color.u32[3];
956       dw[7] = pixel[0];
957       dw[8] = pixel[1];
958 #else
959       assert(cmd_buffer->device->isl_dev.ss.clear_color_state_size == 0);
960       assert(cmd_buffer->device->isl_dev.ss.clear_value_size == 16);
961       uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 3 + 8,
962                                      GENX(MI_STORE_DATA_IMM),
963                                      .StoreQword = true, .Address = addr);
964       dw[3] = clear_color.u32[0];
965       dw[4] = clear_color.u32[1];
966       dw[5] = clear_color.u32[2];
967       dw[6] = clear_color.u32[3];
968       dw[7]  = sample_color.u32[0];
969       dw[8]  = sample_color.u32[1];
970       dw[9]  = sample_color.u32[2];
971       dw[10] = sample_color.u32[3];
972 #endif
973    }
974 }
975 
976 void
genX(set_fast_clear_state)977 genX(set_fast_clear_state)(struct anv_cmd_buffer *cmd_buffer,
978                            const struct anv_image *image,
979                            const enum isl_format format,
980                            const struct isl_swizzle swizzle,
981                            union isl_color_value clear_color)
982 {
983    uint32_t pixel[4] = {};
984    union isl_color_value swiz_color =
985       isl_color_value_swizzle_inv(clear_color, swizzle);
986    isl_color_value_pack(&swiz_color, format, pixel);
987    set_image_clear_color(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT, pixel);
988 
989    if (isl_color_value_is_zero(clear_color, format)) {
990       /* This image has the auxiliary buffer enabled. We can mark the
991        * subresource as not needing a resolve because the clear color
992        * will match what's in every RENDER_SURFACE_STATE object when
993        * it's being used for sampling.
994        */
995       set_image_fast_clear_state(cmd_buffer, image,
996                                  VK_IMAGE_ASPECT_COLOR_BIT,
997                                  ANV_FAST_CLEAR_DEFAULT_VALUE);
998    } else {
999       set_image_fast_clear_state(cmd_buffer, image,
1000                                  VK_IMAGE_ASPECT_COLOR_BIT,
1001                                  ANV_FAST_CLEAR_ANY);
1002    }
1003 }
1004 
1005 /**
1006  * @brief Transitions a color buffer from one layout to another.
1007  *
1008  * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
1009  * more information.
1010  *
1011  * @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
1012  * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
1013  *                    this represents the maximum layers to transition at each
1014  *                    specified miplevel.
1015  */
1016 static void
transition_color_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,const uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,uint32_t src_queue_family,uint32_t dst_queue_family,bool will_full_fast_clear)1017 transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
1018                         const struct anv_image *image,
1019                         VkImageAspectFlagBits aspect,
1020                         const uint32_t base_level, uint32_t level_count,
1021                         uint32_t base_layer, uint32_t layer_count,
1022                         VkImageLayout initial_layout,
1023                         VkImageLayout final_layout,
1024                         uint32_t src_queue_family,
1025                         uint32_t dst_queue_family,
1026                         bool will_full_fast_clear)
1027 {
1028    struct anv_device *device = cmd_buffer->device;
1029    const struct intel_device_info *devinfo = device->info;
1030    /* Validate the inputs. */
1031    assert(cmd_buffer);
1032    assert(image && image->vk.aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1033    /* These values aren't supported for simplicity's sake. */
1034    assert(level_count != VK_REMAINING_MIP_LEVELS &&
1035           layer_count != VK_REMAINING_ARRAY_LAYERS);
1036    /* Ensure the subresource range is valid. */
1037    UNUSED uint64_t last_level_num = base_level + level_count;
1038    const uint32_t max_depth = u_minify(image->vk.extent.depth, base_level);
1039    UNUSED const uint32_t image_layers = MAX2(image->vk.array_layers, max_depth);
1040    assert((uint64_t)base_layer + layer_count  <= image_layers);
1041    assert(last_level_num <= image->vk.mip_levels);
1042    /* If there is a layout transfer, the final layout cannot be undefined or
1043     * preinitialized (VUID-VkImageMemoryBarrier-newLayout-01198).
1044     */
1045    assert(initial_layout == final_layout ||
1046           (final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
1047            final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED));
1048    const struct isl_drm_modifier_info *isl_mod_info =
1049       image->vk.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT
1050       ? isl_drm_modifier_get_info(image->vk.drm_format_mod)
1051       : NULL;
1052 
1053    const bool src_queue_external =
1054       src_queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT ||
1055       src_queue_family == VK_QUEUE_FAMILY_EXTERNAL;
1056 
1057    const bool dst_queue_external =
1058       dst_queue_family == VK_QUEUE_FAMILY_FOREIGN_EXT ||
1059       dst_queue_family == VK_QUEUE_FAMILY_EXTERNAL;
1060 
1061    /* If the queues are external, consider the first queue family flags
1062     * (should be the most capable)
1063     */
1064    const VkQueueFlagBits src_queue_flags =
1065       device->physical->queue.families[
1066          (src_queue_external || src_queue_family == VK_QUEUE_FAMILY_IGNORED) ?
1067          0 : src_queue_family].queueFlags;
1068    const VkQueueFlagBits dst_queue_flags =
1069       device->physical->queue.families[
1070          (dst_queue_external || dst_queue_family == VK_QUEUE_FAMILY_IGNORED) ?
1071          0 : dst_queue_family].queueFlags;
1072 
1073    /* Simultaneous acquire and release on external queues is illegal. */
1074    assert(!src_queue_external || !dst_queue_external);
1075 
1076    /* Ownership transition on an external queue requires special action if the
1077     * image has a DRM format modifier because we store image data in
1078     * a driver-private bo which is inaccessible to the external queue.
1079     */
1080    const bool private_binding_acquire =
1081       src_queue_external &&
1082       anv_image_is_externally_shared(image) &&
1083       anv_image_has_private_binding(image);
1084 
1085    const bool private_binding_release =
1086       dst_queue_external &&
1087       anv_image_is_externally_shared(image) &&
1088       anv_image_has_private_binding(image);
1089 
1090    if (initial_layout == final_layout &&
1091        !private_binding_acquire && !private_binding_release) {
1092       /* No work is needed. */
1093        return;
1094    }
1095 
1096    /**
1097     * Section 7.7.4 of the Vulkan 1.3.260 spec says:
1098     *
1099     *    If the transfer is via an image memory barrier, and an image layout
1100     *    transition is desired, then the values of oldLayout and newLayout in the
1101     *    release operation's memory barrier must be equal to values of oldLayout
1102     *    and newLayout in the acquire operation's memory barrier. Although the
1103     *    image layout transition is submitted twice, it will only be executed
1104     *    once. A layout transition specified in this way happens-after the
1105     *    release operation and happens-before the acquire operation.
1106     *
1107     * Because we know that we get match transition on each queue, we choose to
1108     * only do the work on one queue type : RENDER. In the cases where we do
1109     * transitions between COMPUTE & TRANSFER, we should have matching
1110     * aux/fast_clear value which would trigger no work in the code below.
1111     */
1112    if (!(src_queue_external || dst_queue_external) &&
1113        src_queue_family != VK_QUEUE_FAMILY_IGNORED &&
1114        dst_queue_family != VK_QUEUE_FAMILY_IGNORED &&
1115        src_queue_family != dst_queue_family) {
1116       enum intel_engine_class src_engine =
1117          cmd_buffer->queue_family->engine_class;
1118       if (src_engine != INTEL_ENGINE_CLASS_RENDER)
1119          return;
1120    }
1121 
1122    const uint32_t plane = anv_image_aspect_to_plane(image, aspect);
1123 
1124    if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
1125       return;
1126 
1127    enum isl_aux_usage initial_aux_usage =
1128       anv_layout_to_aux_usage(devinfo, image, aspect, 0,
1129                               initial_layout, src_queue_flags);
1130    enum isl_aux_usage final_aux_usage =
1131       anv_layout_to_aux_usage(devinfo, image, aspect, 0,
1132                               final_layout, dst_queue_flags);
1133    enum anv_fast_clear_type initial_fast_clear =
1134       anv_layout_to_fast_clear_type(devinfo, image, aspect, initial_layout,
1135                                     src_queue_flags);
1136    enum anv_fast_clear_type final_fast_clear =
1137       anv_layout_to_fast_clear_type(devinfo, image, aspect, final_layout,
1138                                     dst_queue_flags);
1139 
1140    /* We must override the anv_layout_to_* functions because they are unaware
1141     * of acquire/release direction.
1142     */
1143    if (private_binding_acquire) {
1144       initial_aux_usage = isl_drm_modifier_has_aux(isl_mod_info->modifier) ?
1145          image->planes[plane].aux_usage : ISL_AUX_USAGE_NONE;
1146       initial_fast_clear = isl_mod_info->supports_clear_color ?
1147          initial_fast_clear : ANV_FAST_CLEAR_NONE;
1148    } else if (private_binding_release) {
1149       final_aux_usage = isl_drm_modifier_has_aux(isl_mod_info->modifier) ?
1150          image->planes[plane].aux_usage : ISL_AUX_USAGE_NONE;
1151       final_fast_clear = isl_mod_info->supports_clear_color ?
1152          final_fast_clear : ANV_FAST_CLEAR_NONE;
1153    }
1154 
1155    assert(image->planes[plane].primary_surface.isl.tiling != ISL_TILING_LINEAR);
1156 
1157    /* The following layouts are equivalent for non-linear images. */
1158    const bool initial_layout_undefined =
1159       initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
1160       initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED;
1161 
1162    bool must_init_fast_clear_state = false;
1163    bool must_init_aux_surface = false;
1164 
1165    if (initial_layout_undefined) {
1166       /* The subresource may have been aliased and populated with arbitrary
1167        * data, so we should initialize fast-clear state on platforms prior to
1168        * Xe2. Xe2+ platforms don't need it thanks to the new design of fast-
1169        * clear.
1170        */
1171       must_init_fast_clear_state = devinfo->ver < 20;
1172 
1173       if (isl_aux_usage_has_mcs(image->planes[plane].aux_usage) ||
1174           devinfo->has_illegal_ccs_values) {
1175 
1176          must_init_aux_surface = true;
1177 
1178       } else {
1179          assert(isl_aux_usage_has_ccs_e(image->planes[plane].aux_usage));
1180 
1181          /* We can start using the CCS immediately without ambiguating. The
1182           * two conditions that enable this are:
1183           *
1184           * 1) The device treats all possible CCS values as legal. In other
1185           *    words, we can't confuse the hardware with random bits in the
1186           *    CCS.
1187           *
1188           * 2) We enable compression on all writable image layouts. The CCS
1189           *    will receive all writes and will therefore always be in sync
1190           *    with the main surface.
1191           *
1192           *    If we were to disable compression on some writable layouts, the
1193           *    CCS could get out of sync with the main surface and the app
1194           *    could lose the data it wrote previously. For example, this
1195           *    could happen if an app: transitions from UNDEFINED w/o
1196           *    ambiguating -> renders with AUX_NONE -> samples with AUX_CCS.
1197           *
1198           * The second condition is asserted below, but could be moved
1199           * elsewhere for more coverage (we're only checking transitions from
1200           * an undefined layout).
1201           */
1202          assert(vk_image_layout_is_read_only(final_layout, aspect) ||
1203                 (final_aux_usage != ISL_AUX_USAGE_NONE));
1204 
1205          must_init_aux_surface = false;
1206       }
1207 
1208    } else if (private_binding_acquire) {
1209       /* The fast clear state lives in a driver-private bo, and therefore the
1210        * external/foreign queue is unaware of it.
1211        *
1212        * If this is the first time we are accessing the image, then the fast
1213        * clear state is uninitialized.
1214        *
1215        * If this is NOT the first time we are accessing the image, then the fast
1216        * clear state may still be valid and correct due to the resolve during
1217        * our most recent ownership release.  However, we do not track the aux
1218        * state with MI stores, and therefore must assume the worst-case: that
1219        * this is the first time we are accessing the image.
1220        */
1221       assert(image->planes[plane].fast_clear_memory_range.binding ==
1222               ANV_IMAGE_MEMORY_BINDING_PRIVATE);
1223       must_init_fast_clear_state = true;
1224 
1225       if (anv_image_get_aux_memory_range(image, plane)->binding ==
1226           ANV_IMAGE_MEMORY_BINDING_PRIVATE) {
1227          /* The aux surface, like the fast clear state, lives in
1228           * a driver-private bo.  We must initialize the aux surface for the
1229           * same reasons we must initialize the fast clear state.
1230           */
1231          must_init_aux_surface = true;
1232       } else {
1233          /* The aux surface, unlike the fast clear state, lives in
1234           * application-visible VkDeviceMemory and is shared with the
1235           * external/foreign queue. Therefore, when we acquire ownership of the
1236           * image with a defined VkImageLayout, the aux surface is valid and has
1237           * the aux state required by the modifier.
1238           */
1239          must_init_aux_surface = false;
1240       }
1241    }
1242 
1243    if (must_init_fast_clear_state) {
1244       if (image->planes[plane].aux_usage == ISL_AUX_USAGE_FCV_CCS_E) {
1245          /* Ensure the raw and converted clear colors are in sync. */
1246          const uint32_t zero_pixel[4] = {};
1247          set_image_clear_color(cmd_buffer, image, aspect, zero_pixel);
1248       }
1249       if (base_level == 0 && base_layer == 0) {
1250          set_image_fast_clear_state(cmd_buffer, image, aspect,
1251                                     ANV_FAST_CLEAR_NONE);
1252       }
1253    }
1254 
1255    if (must_init_aux_surface) {
1256       assert(devinfo->ver >= 20 || must_init_fast_clear_state);
1257 
1258       /* Initialize the aux buffers to enable correct rendering.  In order to
1259        * ensure that things such as storage images work correctly, aux buffers
1260        * need to be initialized to valid data.
1261        *
1262        * Having an aux buffer with invalid data is a problem for two reasons:
1263        *
1264        *  1) Having an invalid value in the buffer can confuse the hardware.
1265        *     For instance, with CCS_E on SKL, a two-bit CCS value of 2 is
1266        *     invalid and leads to the hardware doing strange things.  It
1267        *     doesn't hang as far as we can tell but rendering corruption can
1268        *     occur.
1269        *
1270        *  2) If this transition is into the GENERAL layout and we then use the
1271        *     image as a storage image, then we must have the aux buffer in the
1272        *     pass-through state so that, if we then go to texture from the
1273        *     image, we get the results of our storage image writes and not the
1274        *     fast clear color or other random data.
1275        *
1276        * For CCS both of the problems above are real demonstrable issues.  In
1277        * that case, the only thing we can do is to perform an ambiguate to
1278        * transition the aux surface into the pass-through state.
1279        *
1280        * For MCS, (2) is never an issue because we don't support multisampled
1281        * storage images.  In theory, issue (1) is a problem with MCS but we've
1282        * never seen it in the wild.  For 4x and 16x, all bit patterns could,
1283        * in theory, be interpreted as something but we don't know that all bit
1284        * patterns are actually valid.  For 2x and 8x, you could easily end up
1285        * with the MCS referring to an invalid plane because not all bits of
1286        * the MCS value are actually used.  Even though we've never seen issues
1287        * in the wild, it's best to play it safe and initialize the MCS.  We
1288        * could use a fast-clear for MCS because we only ever touch from render
1289        * and texture (no image load store). However, due to WA 14013111325,
1290        * we choose to ambiguate MCS as well.
1291        */
1292       if (image->vk.samples == 1) {
1293          for (uint32_t l = 0; l < level_count; l++) {
1294             const uint32_t level = base_level + l;
1295 
1296             uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1297             if (base_layer >= aux_layers)
1298                break; /* We will only get fewer layers as level increases */
1299             uint32_t level_layer_count =
1300                MIN2(layer_count, aux_layers - base_layer);
1301 
1302             /* If will_full_fast_clear is set, the caller promises to
1303              * fast-clear the largest portion of the specified range as it can.
1304              * For color images, that means only the first LOD and array slice.
1305              */
1306             if (level == 0 && base_layer == 0 && will_full_fast_clear) {
1307                base_layer++;
1308                level_layer_count--;
1309                if (level_layer_count == 0)
1310                   continue;
1311             }
1312 
1313             anv_image_ccs_op(cmd_buffer, image,
1314                              image->planes[plane].primary_surface.isl.format,
1315                              ISL_SWIZZLE_IDENTITY,
1316                              aspect, level, base_layer, level_layer_count,
1317                              ISL_AUX_OP_AMBIGUATE, NULL, false);
1318 
1319             set_image_compressed_bit(cmd_buffer, image, aspect, level,
1320                                      base_layer, level_layer_count, false);
1321          }
1322       } else {
1323          /* If will_full_fast_clear is set, the caller promises to fast-clear
1324           * the largest portion of the specified range as it can.
1325           */
1326          if (will_full_fast_clear)
1327             return;
1328 
1329          assert(base_level == 0 && level_count == 1);
1330          anv_image_mcs_op(cmd_buffer, image,
1331                           image->planes[plane].primary_surface.isl.format,
1332                           ISL_SWIZZLE_IDENTITY,
1333                           aspect, base_layer, layer_count,
1334                           ISL_AUX_OP_AMBIGUATE, NULL, false);
1335       }
1336       return;
1337    }
1338 
1339    /* The current code assumes that there is no mixing of CCS_E and CCS_D.
1340     * We can handle transitions between CCS_D/E to and from NONE.  What we
1341     * don't yet handle is switching between CCS_E and CCS_D within a given
1342     * image.  Doing so in a performant way requires more detailed aux state
1343     * tracking such as what is done in i965.  For now, just assume that we
1344     * only have one type of compression.
1345     */
1346    assert(initial_aux_usage == ISL_AUX_USAGE_NONE ||
1347           final_aux_usage == ISL_AUX_USAGE_NONE ||
1348           initial_aux_usage == final_aux_usage);
1349 
1350    /* If initial aux usage is NONE, there is nothing to resolve */
1351    if (initial_aux_usage == ISL_AUX_USAGE_NONE)
1352       return;
1353 
1354    enum isl_aux_op resolve_op = ISL_AUX_OP_NONE;
1355 
1356    /* If the initial layout supports more fast clear than the final layout
1357     * then we need at least a partial resolve.
1358     */
1359    if (final_fast_clear < initial_fast_clear) {
1360       /* Partial resolves will actually only occur on layer 0/level 0. This
1361        * is generally okay because anv only allows explicit fast clears to
1362        * the first subresource.
1363        *
1364        * The situation is a bit different with FCV_CCS_E. With that aux
1365        * usage, implicit fast clears can occur on any layer and level.
1366        * anv doesn't track fast clear states for more than the first
1367        * subresource, so we need to assert that a layout transition doesn't
1368        * attempt to partial resolve the other subresources.
1369        *
1370        * At the moment, we don't enter such a situation, and partial resolves
1371        * for higher level/layer resources shouldn't be a concern.
1372        */
1373       if (image->planes[plane].aux_usage == ISL_AUX_USAGE_FCV_CCS_E) {
1374          assert(base_level == 0 && level_count == 1 &&
1375                 base_layer == 0 && layer_count == 1);
1376       }
1377       resolve_op = ISL_AUX_OP_PARTIAL_RESOLVE;
1378    }
1379 
1380    if (isl_aux_usage_has_ccs_e(initial_aux_usage) &&
1381        !isl_aux_usage_has_ccs_e(final_aux_usage))
1382       resolve_op = ISL_AUX_OP_FULL_RESOLVE;
1383 
1384    if (resolve_op == ISL_AUX_OP_NONE)
1385       return;
1386 
1387    for (uint32_t l = 0; l < level_count; l++) {
1388       uint32_t level = base_level + l;
1389 
1390       uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1391       if (base_layer >= aux_layers)
1392          break; /* We will only get fewer layers as level increases */
1393       uint32_t level_layer_count =
1394          MIN2(layer_count, aux_layers - base_layer);
1395 
1396       for (uint32_t a = 0; a < level_layer_count; a++) {
1397          uint32_t array_layer = base_layer + a;
1398 
1399          /* If will_full_fast_clear is set, the caller promises to fast-clear
1400           * the largest portion of the specified range as it can.  For color
1401           * images, that means only the first LOD and array slice.
1402           */
1403          if (level == 0 && array_layer == 0 && will_full_fast_clear)
1404             continue;
1405 
1406          if (image->vk.samples == 1) {
1407             anv_cmd_predicated_ccs_resolve(cmd_buffer, image,
1408                                            image->planes[plane].primary_surface.isl.format,
1409                                            ISL_SWIZZLE_IDENTITY,
1410                                            aspect, level, array_layer, resolve_op,
1411                                            final_fast_clear);
1412          } else {
1413             /* We only support fast-clear on the first layer so partial
1414              * resolves should not be used on other layers as they will use
1415              * the clear color stored in memory that is only valid for layer0.
1416              */
1417             if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
1418                 array_layer != 0)
1419                continue;
1420 
1421             anv_cmd_predicated_mcs_resolve(cmd_buffer, image,
1422                                            image->planes[plane].primary_surface.isl.format,
1423                                            ISL_SWIZZLE_IDENTITY,
1424                                            aspect, array_layer, resolve_op,
1425                                            final_fast_clear);
1426          }
1427       }
1428    }
1429 }
1430 
1431 static MUST_CHECK VkResult
anv_cmd_buffer_init_attachments(struct anv_cmd_buffer * cmd_buffer,uint32_t color_att_count)1432 anv_cmd_buffer_init_attachments(struct anv_cmd_buffer *cmd_buffer,
1433                                 uint32_t color_att_count)
1434 {
1435    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
1436 
1437    /* Reserve one for the NULL state. */
1438    unsigned num_states = 1 + color_att_count;
1439    const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1440    const uint32_t ss_stride = align(isl_dev->ss.size, isl_dev->ss.align);
1441    gfx->att_states =
1442       anv_cmd_buffer_alloc_surface_states(cmd_buffer, num_states);
1443    if (gfx->att_states.map == NULL)
1444       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
1445 
1446    struct anv_state next_state = gfx->att_states;
1447    next_state.alloc_size = isl_dev->ss.size;
1448 
1449    gfx->null_surface_state = next_state;
1450    next_state.offset += ss_stride;
1451    next_state.map += ss_stride;
1452 
1453    gfx->color_att_count = color_att_count;
1454    for (uint32_t i = 0; i < color_att_count; i++) {
1455       gfx->color_att[i] = (struct anv_attachment) {
1456          .surface_state.state = next_state,
1457       };
1458       next_state.offset += ss_stride;
1459       next_state.map += ss_stride;
1460    }
1461    gfx->depth_att = (struct anv_attachment) { };
1462    gfx->stencil_att = (struct anv_attachment) { };
1463 
1464    return VK_SUCCESS;
1465 }
1466 
1467 static void
anv_cmd_buffer_reset_rendering(struct anv_cmd_buffer * cmd_buffer)1468 anv_cmd_buffer_reset_rendering(struct anv_cmd_buffer *cmd_buffer)
1469 {
1470    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
1471 
1472    gfx->render_area = (VkRect2D) { };
1473    gfx->layer_count = 0;
1474    gfx->samples = 0;
1475 
1476    gfx->color_att_count = 0;
1477    gfx->depth_att = (struct anv_attachment) { };
1478    gfx->stencil_att = (struct anv_attachment) { };
1479    gfx->null_surface_state = ANV_STATE_NULL;
1480 }
1481 
1482 /**
1483  * Program the hardware to use the specified L3 configuration.
1484  */
1485 void
genX(cmd_buffer_config_l3)1486 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
1487                            const struct intel_l3_config *cfg)
1488 {
1489    assert(cfg || GFX_VER >= 12);
1490    if (cfg == cmd_buffer->state.current_l3_config)
1491       return;
1492 
1493 #if GFX_VER >= 11
1494    /* On Gfx11+ we use only one config, so verify it remains the same and skip
1495     * the stalling programming entirely.
1496     */
1497    assert(cfg == cmd_buffer->device->l3_config);
1498 #else
1499    if (INTEL_DEBUG(DEBUG_L3)) {
1500       mesa_logd("L3 config transition: ");
1501       intel_dump_l3_config(cfg, stderr);
1502    }
1503 
1504    /* According to the hardware docs, the L3 partitioning can only be changed
1505     * while the pipeline is completely drained and the caches are flushed,
1506     * which involves a first PIPE_CONTROL flush which stalls the pipeline...
1507     */
1508    genx_batch_emit_pipe_control(&cmd_buffer->batch, cmd_buffer->device->info,
1509                                 cmd_buffer->state.current_pipeline,
1510                                 ANV_PIPE_DATA_CACHE_FLUSH_BIT |
1511                                 ANV_PIPE_CS_STALL_BIT);
1512 
1513    /* ...followed by a second pipelined PIPE_CONTROL that initiates
1514     * invalidation of the relevant caches.  Note that because RO invalidation
1515     * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
1516     * command is processed by the CS) we cannot combine it with the previous
1517     * stalling flush as the hardware documentation suggests, because that
1518     * would cause the CS to stall on previous rendering *after* RO
1519     * invalidation and wouldn't prevent the RO caches from being polluted by
1520     * concurrent rendering before the stall completes.  This intentionally
1521     * doesn't implement the SKL+ hardware workaround suggesting to enable CS
1522     * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
1523     * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
1524     * already guarantee that there is no concurrent GPGPU kernel execution
1525     * (see SKL HSD 2132585).
1526     */
1527    genx_batch_emit_pipe_control(&cmd_buffer->batch, cmd_buffer->device->info,
1528                                 cmd_buffer->state.current_pipeline,
1529                                 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
1530                                 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT |
1531                                 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT |
1532                                 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT);
1533 
1534    /* Now send a third stalling flush to make sure that invalidation is
1535     * complete when the L3 configuration registers are modified.
1536     */
1537    genx_batch_emit_pipe_control(&cmd_buffer->batch, cmd_buffer->device->info,
1538                                 cmd_buffer->state.current_pipeline,
1539                                 ANV_PIPE_DATA_CACHE_FLUSH_BIT |
1540                                 ANV_PIPE_CS_STALL_BIT);
1541 
1542    genX(emit_l3_config)(&cmd_buffer->batch, cmd_buffer->device, cfg);
1543 #endif /* GFX_VER >= 11 */
1544    cmd_buffer->state.current_l3_config = cfg;
1545 }
1546 
1547 ALWAYS_INLINE void
genX(invalidate_aux_map)1548 genX(invalidate_aux_map)(struct anv_batch *batch,
1549                          struct anv_device *device,
1550                          enum intel_engine_class engine_class,
1551                          enum anv_pipe_bits bits)
1552 {
1553 #if GFX_VER == 12
1554    if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) && device->info->has_aux_map) {
1555       uint32_t register_addr = 0;
1556       switch (engine_class) {
1557       case INTEL_ENGINE_CLASS_COMPUTE:
1558          register_addr = GENX(COMPCS0_CCS_AUX_INV_num);
1559          break;
1560       case INTEL_ENGINE_CLASS_COPY:
1561 #if GFX_VERx10 >= 125
1562          register_addr = GENX(BCS_CCS_AUX_INV_num);
1563 #endif
1564          break;
1565       case INTEL_ENGINE_CLASS_VIDEO:
1566          register_addr = GENX(VD0_CCS_AUX_INV_num);
1567          break;
1568       case INTEL_ENGINE_CLASS_RENDER:
1569       default:
1570          register_addr = GENX(GFX_CCS_AUX_INV_num);
1571          break;
1572       }
1573 
1574       anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
1575          lri.RegisterOffset = register_addr;
1576          lri.DataDWord = 1;
1577       }
1578 
1579       /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
1580       if (intel_needs_workaround(device->info, 16018063123) &&
1581           engine_class == INTEL_ENGINE_CLASS_COPY) {
1582          genX(batch_emit_fast_color_dummy_blit)(batch, device);
1583       }
1584 
1585       /* HSD 22012751911: SW Programming sequence when issuing aux invalidation:
1586        *
1587        *    "Poll Aux Invalidation bit once the invalidation is set
1588        *     (Register 4208 bit 0)"
1589        */
1590       anv_batch_emit(batch, GENX(MI_SEMAPHORE_WAIT), sem) {
1591          sem.CompareOperation = COMPARE_SAD_EQUAL_SDD;
1592          sem.WaitMode = PollingMode;
1593          sem.RegisterPollMode = true;
1594          sem.SemaphoreDataDword = 0x0;
1595          sem.SemaphoreAddress =
1596             anv_address_from_u64(register_addr);
1597       }
1598    }
1599 #else
1600    assert(!device->info->has_aux_map);
1601 #endif
1602 }
1603 
1604 ALWAYS_INLINE enum anv_pipe_bits
genX(emit_apply_pipe_flushes)1605 genX(emit_apply_pipe_flushes)(struct anv_batch *batch,
1606                               struct anv_device *device,
1607                               uint32_t current_pipeline,
1608                               enum anv_pipe_bits bits,
1609                               enum anv_pipe_bits *emitted_flush_bits)
1610 {
1611 #if GFX_VER >= 12
1612    /* From the TGL PRM, Volume 2a, "PIPE_CONTROL":
1613     *
1614     *     "SW must follow below programming restrictions when programming
1615     *      PIPE_CONTROL command [for ComputeCS]:
1616     *      ...
1617     *      Following bits must not be set when programmed for ComputeCS:
1618     *      - "Render Target Cache Flush Enable", "Depth Cache Flush Enable"
1619     *         and "Tile Cache Flush Enable"
1620     *      - "Depth Stall Enable", Stall at Pixel Scoreboard and
1621     *         "PSD Sync Enable".
1622     *      - "OVR Tile 0 Flush", "TBIMR Force Batch Closure",
1623     *         "AMFS Flush Enable", "VF Cache Invalidation Enable" and
1624     *         "Global Snapshot Count Reset"."
1625     *
1626     * XXX: According to spec this should not be a concern for a regular
1627     * RCS in GPGPU mode, but during testing it was found that at least
1628     * "VF Cache Invalidation Enable" bit is ignored in such case.
1629     * This can cause us to miss some important invalidations
1630     * (e.g. from CmdPipelineBarriers) and have incoherent data.
1631     *
1632     * There is also a Wa_1606932921 "RCS is not waking up fixed function clock
1633     * when specific 3d related bits are programmed in pipecontrol in
1634     * compute mode" that suggests us not to use "RT Cache Flush" in GPGPU mode.
1635     *
1636     * The other bits are not confirmed to cause problems, but included here
1637     * just to be safe, as they're also not really relevant in the GPGPU mode,
1638     * and having them doesn't seem to cause any regressions.
1639     *
1640     * So if we're currently in GPGPU mode, we hide some bits from
1641     * this flush, and will flush them only when we'll be able to.
1642     * Similar thing with GPGPU-only bits.
1643     */
1644    enum anv_pipe_bits defer_bits = bits &
1645       (current_pipeline == GPGPU ? ANV_PIPE_GFX_BITS: ANV_PIPE_GPGPU_BITS);
1646 
1647    bits &= ~defer_bits;
1648 #endif
1649 
1650    /*
1651     * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
1652     *
1653     *    Write synchronization is a special case of end-of-pipe
1654     *    synchronization that requires that the render cache and/or depth
1655     *    related caches are flushed to memory, where the data will become
1656     *    globally visible. This type of synchronization is required prior to
1657     *    SW (CPU) actually reading the result data from memory, or initiating
1658     *    an operation that will use as a read surface (such as a texture
1659     *    surface) a previous render target and/or depth/stencil buffer
1660     *
1661     *
1662     * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
1663     *
1664     *    Exercising the write cache flush bits (Render Target Cache Flush
1665     *    Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
1666     *    ensures the write caches are flushed and doesn't guarantee the data
1667     *    is globally visible.
1668     *
1669     *    SW can track the completion of the end-of-pipe-synchronization by
1670     *    using "Notify Enable" and "PostSync Operation - Write Immediate
1671     *    Data" in the PIPE_CONTROL command.
1672     *
1673     * In other words, flushes are pipelined while invalidations are handled
1674     * immediately.  Therefore, if we're flushing anything then we need to
1675     * schedule an end-of-pipe sync before any invalidations can happen.
1676     */
1677    if (bits & ANV_PIPE_FLUSH_BITS)
1678       bits |= ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
1679 
1680    /* From Bspec 43904 (Register_CCSAuxiliaryTableInvalidate):
1681     * RCS engine idle sequence:
1682     *
1683     *    Gfx12+:
1684     *       PIPE_CONTROL:- DC Flush + L3 Fabric Flush + CS Stall + Render
1685     *                      Target Cache Flush + Depth Cache
1686     *
1687     *    Gfx125+:
1688     *       PIPE_CONTROL:- DC Flush + L3 Fabric Flush + CS Stall + Render
1689     *                      Target Cache Flush + Depth Cache + CCS flush
1690     *
1691     * Compute engine idle sequence:
1692     *
1693     *    Gfx12+:
1694     *       PIPE_CONTROL:- DC Flush + L3 Fabric Flush + CS Stall
1695     *
1696     *    Gfx125+:
1697     *       PIPE_CONTROL:- DC Flush + L3 Fabric Flush + CS Stall + CCS flush
1698     */
1699    if (GFX_VER == 12 && (bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT)) {
1700       if (current_pipeline == GPGPU) {
1701          bits |=  (ANV_PIPE_DATA_CACHE_FLUSH_BIT |
1702                    ANV_PIPE_L3_FABRIC_FLUSH_BIT |
1703                    ANV_PIPE_CS_STALL_BIT |
1704                    (GFX_VERx10 == 125 ? ANV_PIPE_CCS_CACHE_FLUSH_BIT: 0));
1705       } else if (current_pipeline == _3D) {
1706          bits |= (ANV_PIPE_DATA_CACHE_FLUSH_BIT |
1707                   ANV_PIPE_L3_FABRIC_FLUSH_BIT |
1708                   ANV_PIPE_CS_STALL_BIT |
1709                   ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
1710                   ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
1711                   (GFX_VERx10 == 125 ? ANV_PIPE_CCS_CACHE_FLUSH_BIT: 0));
1712       }
1713    }
1714 
1715    /* If we're going to do an invalidate and we have a pending end-of-pipe
1716     * sync that has yet to be resolved, we do the end-of-pipe sync now.
1717     */
1718    if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
1719        (bits & ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT)) {
1720       bits |= ANV_PIPE_END_OF_PIPE_SYNC_BIT;
1721       bits &= ~ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
1722 
1723       if (INTEL_DEBUG(DEBUG_PIPE_CONTROL) && bits) {
1724          fputs("acc: add ", stdout);
1725          anv_dump_pipe_bits(ANV_PIPE_END_OF_PIPE_SYNC_BIT, stdout);
1726          fprintf(stdout, "reason: Ensure flushes done before invalidate\n");
1727       }
1728    }
1729 
1730    /* Project: SKL / Argument: LRI Post Sync Operation [23]
1731     *
1732     * "PIPECONTROL command with “Command Streamer Stall Enable” must be
1733     *  programmed prior to programming a PIPECONTROL command with "LRI
1734     *  Post Sync Operation" in GPGPU mode of operation (i.e when
1735     *  PIPELINE_SELECT command is set to GPGPU mode of operation)."
1736     *
1737     * The same text exists a few rows below for Post Sync Op.
1738     */
1739    if (bits & ANV_PIPE_POST_SYNC_BIT) {
1740       if (GFX_VER == 9 && current_pipeline == GPGPU)
1741          bits |= ANV_PIPE_CS_STALL_BIT;
1742       bits &= ~ANV_PIPE_POST_SYNC_BIT;
1743    }
1744 
1745    if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_STALL_BITS |
1746                ANV_PIPE_END_OF_PIPE_SYNC_BIT)) {
1747       enum anv_pipe_bits flush_bits =
1748          bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_STALL_BITS |
1749                  ANV_PIPE_END_OF_PIPE_SYNC_BIT);
1750 
1751       uint32_t sync_op = NoWrite;
1752       struct anv_address addr = ANV_NULL_ADDRESS;
1753 
1754       /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
1755        *
1756        *    "The most common action to perform upon reaching a
1757        *    synchronization point is to write a value out to memory. An
1758        *    immediate value (included with the synchronization command) may
1759        *    be written."
1760        *
1761        *
1762        * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
1763        *
1764        *    "In case the data flushed out by the render engine is to be
1765        *    read back in to the render engine in coherent manner, then the
1766        *    render engine has to wait for the fence completion before
1767        *    accessing the flushed data. This can be achieved by following
1768        *    means on various products: PIPE_CONTROL command with CS Stall
1769        *    and the required write caches flushed with Post-Sync-Operation
1770        *    as Write Immediate Data.
1771        *
1772        *    Example:
1773        *       - Workload-1 (3D/GPGPU/MEDIA)
1774        *       - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
1775        *         Immediate Data, Required Write Cache Flush bits set)
1776        *       - Workload-2 (Can use the data produce or output by
1777        *         Workload-1)
1778        */
1779       if (flush_bits & ANV_PIPE_END_OF_PIPE_SYNC_BIT) {
1780          flush_bits |= ANV_PIPE_CS_STALL_BIT;
1781          sync_op = WriteImmediateData;
1782          addr = device->workaround_address;
1783       }
1784 
1785       /* Flush PC. */
1786       genx_batch_emit_pipe_control_write(batch, device->info, current_pipeline,
1787                                          sync_op, addr, 0, flush_bits);
1788 
1789       /* If the caller wants to know what flushes have been emitted,
1790        * provide the bits based off the PIPE_CONTROL programmed bits.
1791        */
1792       if (emitted_flush_bits != NULL)
1793          *emitted_flush_bits = flush_bits;
1794 
1795       bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_STALL_BITS |
1796                 ANV_PIPE_END_OF_PIPE_SYNC_BIT);
1797    }
1798 
1799    if (bits & ANV_PIPE_INVALIDATE_BITS) {
1800       uint32_t sync_op = NoWrite;
1801       struct anv_address addr = ANV_NULL_ADDRESS;
1802 
1803       /* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
1804        *
1805        *    "When VF Cache Invalidate is set “Post Sync Operation” must be
1806        *    enabled to “Write Immediate Data” or “Write PS Depth Count” or
1807        *    “Write Timestamp”.
1808        */
1809       if (GFX_VER == 9 && (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT)) {
1810          sync_op = WriteImmediateData;
1811          addr = device->workaround_address;
1812       }
1813 
1814       /* Invalidate PC. */
1815       genx_batch_emit_pipe_control_write(batch, device->info, current_pipeline,
1816                                          sync_op, addr, 0, bits);
1817 
1818       enum intel_engine_class engine_class =
1819          current_pipeline == GPGPU ? INTEL_ENGINE_CLASS_COMPUTE :
1820                                      INTEL_ENGINE_CLASS_RENDER;
1821       genX(invalidate_aux_map)(batch, device, engine_class, bits);
1822 
1823       bits &= ~ANV_PIPE_INVALIDATE_BITS;
1824    }
1825 
1826 #if GFX_VER >= 12
1827    bits |= defer_bits;
1828 #endif
1829 
1830    return bits;
1831 }
1832 
1833 ALWAYS_INLINE void
genX(cmd_buffer_apply_pipe_flushes)1834 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
1835 {
1836 #if INTEL_NEEDS_WA_1508744258
1837    /* If we're changing the state of the RHWO optimization, we need to have
1838     * sb_stall+cs_stall.
1839     */
1840    const bool rhwo_opt_change =
1841       cmd_buffer->state.rhwo_optimization_enabled !=
1842       cmd_buffer->state.pending_rhwo_optimization_enabled;
1843    if (rhwo_opt_change) {
1844       anv_add_pending_pipe_bits(cmd_buffer,
1845                                 ANV_PIPE_STALL_AT_SCOREBOARD_BIT |
1846                                 ANV_PIPE_END_OF_PIPE_SYNC_BIT,
1847                                 "change RHWO optimization");
1848    }
1849 #endif
1850 
1851    enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
1852 
1853    if (unlikely(cmd_buffer->device->physical->always_flush_cache))
1854       bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
1855    else if (bits == 0)
1856       return;
1857 
1858    if (anv_cmd_buffer_is_blitter_queue(cmd_buffer) ||
1859        anv_cmd_buffer_is_video_queue(cmd_buffer)) {
1860       if (bits & ANV_PIPE_INVALIDATE_BITS) {
1861          genX(invalidate_aux_map)(&cmd_buffer->batch, cmd_buffer->device,
1862                                   cmd_buffer->queue_family->engine_class, bits);
1863          bits &= ~ANV_PIPE_INVALIDATE_BITS;
1864       }
1865       cmd_buffer->state.pending_pipe_bits = bits;
1866       return;
1867    }
1868 
1869    if (GFX_VER == 9 &&
1870        (bits & ANV_PIPE_CS_STALL_BIT) &&
1871        (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT)) {
1872       /* If we are doing a VF cache invalidate AND a CS stall (it must be
1873        * both) then we can reset our vertex cache tracking.
1874        */
1875       memset(cmd_buffer->state.gfx.vb_dirty_ranges, 0,
1876              sizeof(cmd_buffer->state.gfx.vb_dirty_ranges));
1877       memset(&cmd_buffer->state.gfx.ib_dirty_range, 0,
1878              sizeof(cmd_buffer->state.gfx.ib_dirty_range));
1879    }
1880 
1881    enum anv_pipe_bits emitted_bits = 0;
1882    cmd_buffer->state.pending_pipe_bits =
1883       genX(emit_apply_pipe_flushes)(&cmd_buffer->batch,
1884                                     cmd_buffer->device,
1885                                     cmd_buffer->state.current_pipeline,
1886                                     bits,
1887                                     &emitted_bits);
1888    anv_cmd_buffer_update_pending_query_bits(cmd_buffer, emitted_bits);
1889 
1890 #if INTEL_NEEDS_WA_1508744258
1891    if (rhwo_opt_change) {
1892       anv_batch_write_reg(&cmd_buffer->batch, GENX(COMMON_SLICE_CHICKEN1), c1) {
1893          c1.RCCRHWOOptimizationDisable =
1894             !cmd_buffer->state.pending_rhwo_optimization_enabled;
1895          c1.RCCRHWOOptimizationDisableMask = true;
1896       }
1897       cmd_buffer->state.rhwo_optimization_enabled =
1898          cmd_buffer->state.pending_rhwo_optimization_enabled;
1899    }
1900 #endif
1901 
1902 }
1903 
1904 static inline struct anv_state
emit_dynamic_buffer_binding_table_entry(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_pipeline_binding * binding,const struct anv_descriptor * desc)1905 emit_dynamic_buffer_binding_table_entry(struct anv_cmd_buffer *cmd_buffer,
1906                                         struct anv_cmd_pipeline_state *pipe_state,
1907                                         struct anv_pipeline_binding *binding,
1908                                         const struct anv_descriptor *desc)
1909 {
1910    if (!desc->buffer)
1911       return anv_null_surface_state_for_binding_table(cmd_buffer->device);
1912 
1913    /* Compute the offset within the buffer */
1914    uint32_t dynamic_offset =
1915       pipe_state->dynamic_offsets[
1916          binding->set].offsets[binding->dynamic_offset_index];
1917    uint64_t offset = desc->offset + dynamic_offset;
1918    /* Clamp to the buffer size */
1919    offset = MIN2(offset, desc->buffer->vk.size);
1920    /* Clamp the range to the buffer size */
1921    uint32_t range = MIN2(desc->range, desc->buffer->vk.size - offset);
1922 
1923    /* Align the range to the reported bounds checking alignment
1924     * VkPhysicalDeviceRobustness2PropertiesEXT::robustUniformBufferAccessSizeAlignment
1925     */
1926    if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1927       range = align(range, ANV_UBO_ALIGNMENT);
1928 
1929    struct anv_address address =
1930       anv_address_add(desc->buffer->address, offset);
1931 
1932    struct anv_state surface_state =
1933       anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
1934    if (surface_state.map == NULL)
1935       return ANV_STATE_NULL;
1936 
1937    enum isl_format format =
1938       anv_isl_format_for_descriptor_type(cmd_buffer->device,
1939                                          desc->type);
1940 
1941    isl_surf_usage_flags_t usage =
1942       anv_isl_usage_for_descriptor_type(desc->type);
1943 
1944    anv_fill_buffer_surface_state(cmd_buffer->device,
1945                                  surface_state.map,
1946                                  format, ISL_SWIZZLE_IDENTITY,
1947                                  usage, address, range, 1);
1948 
1949    return surface_state;
1950 }
1951 
1952 static uint32_t
emit_indirect_descriptor_binding_table_entry(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_pipeline_binding * binding,const struct anv_descriptor * desc)1953 emit_indirect_descriptor_binding_table_entry(struct anv_cmd_buffer *cmd_buffer,
1954                                              struct anv_cmd_pipeline_state *pipe_state,
1955                                              struct anv_pipeline_binding *binding,
1956                                              const struct anv_descriptor *desc)
1957 {
1958    struct anv_device *device = cmd_buffer->device;
1959    struct anv_state surface_state;
1960 
1961    /* Relative offset in the STATE_BASE_ADDRESS::SurfaceStateBaseAddress heap.
1962     * Depending on where the descriptor surface state is allocated, they can
1963     * either come from device->internal_surface_state_pool or
1964     * device->bindless_surface_state_pool.
1965     */
1966    switch (desc->type) {
1967    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1968    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1969    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
1970       if (desc->image_view) {
1971          const struct anv_surface_state *sstate =
1972             anv_image_view_texture_surface_state(desc->image_view,
1973                                                  binding->plane,
1974                                                  desc->layout);
1975          surface_state = desc->image_view->use_surface_state_stream ?
1976             sstate->state :
1977             anv_bindless_state_for_binding_table(device, sstate->state);
1978          assert(surface_state.alloc_size);
1979       } else {
1980          surface_state = anv_null_surface_state_for_binding_table(device);
1981       }
1982       break;
1983    }
1984 
1985    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
1986       if (desc->image_view) {
1987          const struct anv_surface_state *sstate =
1988             anv_image_view_storage_surface_state(desc->image_view);
1989          surface_state = desc->image_view->use_surface_state_stream ?
1990             sstate->state :
1991             anv_bindless_state_for_binding_table(device, sstate->state);
1992          assert(surface_state.alloc_size);
1993       } else {
1994          surface_state =
1995             anv_null_surface_state_for_binding_table(device);
1996       }
1997       break;
1998    }
1999 
2000    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2001    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2002       if (desc->set_buffer_view) {
2003          surface_state = desc->set_buffer_view->general.state;
2004          assert(surface_state.alloc_size);
2005       } else {
2006          surface_state = anv_null_surface_state_for_binding_table(device);
2007       }
2008       break;
2009 
2010    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2011       if (desc->buffer_view) {
2012          surface_state = anv_bindless_state_for_binding_table(
2013             device,
2014             desc->buffer_view->general.state);
2015          assert(surface_state.alloc_size);
2016       } else {
2017          surface_state = anv_null_surface_state_for_binding_table(device);
2018       }
2019       break;
2020 
2021    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2022    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2023       surface_state =
2024          emit_dynamic_buffer_binding_table_entry(cmd_buffer, pipe_state,
2025                                                  binding, desc);
2026       break;
2027 
2028    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2029       if (desc->buffer_view) {
2030          surface_state = anv_bindless_state_for_binding_table(
2031             device, desc->buffer_view->storage.state);
2032          assert(surface_state.alloc_size);
2033       } else {
2034          surface_state = anv_null_surface_state_for_binding_table(device);
2035       }
2036       break;
2037 
2038    default:
2039       unreachable("Invalid descriptor type");
2040    }
2041 
2042    return surface_state.offset;
2043 }
2044 
2045 static uint32_t
emit_direct_descriptor_binding_table_entry(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,const struct anv_descriptor_set * set,struct anv_pipeline_binding * binding,const struct anv_descriptor * desc)2046 emit_direct_descriptor_binding_table_entry(struct anv_cmd_buffer *cmd_buffer,
2047                                            struct anv_cmd_pipeline_state *pipe_state,
2048                                            const struct anv_descriptor_set *set,
2049                                            struct anv_pipeline_binding *binding,
2050                                            const struct anv_descriptor *desc)
2051 {
2052    uint32_t desc_offset;
2053 
2054    /* Relative offset in the STATE_BASE_ADDRESS::SurfaceStateBaseAddress heap.
2055     * Depending on where the descriptor surface state is allocated, they can
2056     * either come from device->internal_surface_state_pool or
2057     * device->bindless_surface_state_pool.
2058     */
2059    switch (desc->type) {
2060    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2061    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
2062    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2063    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2064    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2065    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2066    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2067    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2068       desc_offset = set->desc_offset + binding->set_offset;
2069       break;
2070 
2071    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2072    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
2073       struct anv_state state =
2074          emit_dynamic_buffer_binding_table_entry(cmd_buffer, pipe_state,
2075                                                  binding, desc);
2076       desc_offset = state.offset;
2077       break;
2078    }
2079 
2080    default:
2081       unreachable("Invalid descriptor type");
2082    }
2083 
2084    return desc_offset;
2085 }
2086 
2087 static VkResult
emit_binding_table(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_shader_bin * shader,struct anv_state * bt_state)2088 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
2089                    struct anv_cmd_pipeline_state *pipe_state,
2090                    struct anv_shader_bin *shader,
2091                    struct anv_state *bt_state)
2092 {
2093    uint32_t state_offset;
2094 
2095    struct anv_pipeline_bind_map *map = &shader->bind_map;
2096    if (map->surface_count == 0) {
2097       *bt_state = (struct anv_state) { 0, };
2098       return VK_SUCCESS;
2099    }
2100 
2101    *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
2102                                                   map->surface_count,
2103                                                   &state_offset);
2104    uint32_t *bt_map = bt_state->map;
2105 
2106    if (bt_state->map == NULL)
2107       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2108 
2109    for (uint32_t s = 0; s < map->surface_count; s++) {
2110       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
2111 
2112       struct anv_state surface_state;
2113 
2114       switch (binding->set) {
2115       case ANV_DESCRIPTOR_SET_NULL:
2116          bt_map[s] = 0;
2117          break;
2118 
2119       case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
2120          /* Color attachment binding */
2121          assert(shader->stage == MESA_SHADER_FRAGMENT);
2122          uint32_t index = binding->index < MAX_RTS ?
2123             cmd_buffer->state.gfx.color_output_mapping[binding->index] :
2124             binding->index;
2125          if (index < cmd_buffer->state.gfx.color_att_count) {
2126             assert(index < MAX_RTS);
2127             const struct anv_attachment *att =
2128                &cmd_buffer->state.gfx.color_att[index];
2129             surface_state = att->surface_state.state;
2130          } else {
2131             surface_state = cmd_buffer->state.gfx.null_surface_state;
2132          }
2133          assert(surface_state.map);
2134          bt_map[s] = surface_state.offset + state_offset;
2135          break;
2136 
2137       case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
2138          struct anv_descriptor_set *set =
2139             pipe_state->descriptors[binding->index];
2140 
2141          /* If the shader doesn't access the set buffer, just put the null
2142           * surface.
2143           */
2144          if (set->is_push && !shader->push_desc_info.used_set_buffer) {
2145             bt_map[s] = 0;
2146             break;
2147          }
2148 
2149          /* This is a descriptor set buffer so the set index is actually
2150           * given by binding->binding.  (Yes, that's confusing.)
2151           */
2152          assert(set->desc_surface_mem.alloc_size);
2153          assert(set->desc_surface_state.alloc_size);
2154          bt_map[s] = set->desc_surface_state.offset + state_offset;
2155          add_surface_reloc(cmd_buffer, anv_descriptor_set_address(set));
2156          break;
2157       }
2158 
2159       case ANV_DESCRIPTOR_SET_DESCRIPTORS_BUFFER: {
2160          assert(pipe_state->descriptor_buffers[binding->index].state.alloc_size);
2161          bt_map[s] = pipe_state->descriptor_buffers[binding->index].state.offset +
2162                      state_offset;
2163          break;
2164       }
2165 
2166       default: {
2167          assert(binding->set < MAX_SETS);
2168          const struct anv_descriptor_set *set =
2169             pipe_state->descriptors[binding->set];
2170 
2171          if (binding->index >= set->descriptor_count) {
2172             /* From the Vulkan spec section entitled "DescriptorSet and
2173              * Binding Assignment":
2174              *
2175              *    "If the array is runtime-sized, then array elements greater
2176              *    than or equal to the size of that binding in the bound
2177              *    descriptor set must not be used."
2178              *
2179              * Unfortunately, the compiler isn't smart enough to figure out
2180              * when a dynamic binding isn't used so it may grab the whole
2181              * array and stick it in the binding table.  In this case, it's
2182              * safe to just skip those bindings that are OOB.
2183              */
2184             assert(binding->index < set->layout->descriptor_count);
2185             continue;
2186          }
2187 
2188          /* For push descriptor, if the binding is fully promoted to push
2189           * constants, just reference the null surface in the binding table.
2190           * It's unused and we didn't allocate/pack a surface state for it .
2191           */
2192          if (set->is_push) {
2193             uint32_t desc_idx = set->layout->binding[binding->binding].descriptor_index;
2194             assert(desc_idx < MAX_PUSH_DESCRIPTORS);
2195 
2196             if (shader->push_desc_info.fully_promoted_ubo_descriptors & BITFIELD_BIT(desc_idx)) {
2197                surface_state =
2198                   anv_null_surface_state_for_binding_table(cmd_buffer->device);
2199                break;
2200             }
2201          }
2202 
2203          const struct anv_descriptor *desc = &set->descriptors[binding->index];
2204          if (desc->type == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR ||
2205              desc->type == VK_DESCRIPTOR_TYPE_SAMPLER) {
2206             /* Nothing for us to do here */
2207             continue;
2208          }
2209 
2210          const struct anv_pipeline *pipeline = pipe_state->pipeline;
2211          uint32_t surface_state_offset;
2212          if (pipeline->layout.type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_INDIRECT) {
2213             surface_state_offset =
2214                emit_indirect_descriptor_binding_table_entry(cmd_buffer,
2215                                                             pipe_state,
2216                                                             binding, desc);
2217          } else {
2218             assert(pipeline->layout.type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_DIRECT ||
2219                    pipeline->layout.type == ANV_PIPELINE_DESCRIPTOR_SET_LAYOUT_TYPE_BUFFER);
2220             surface_state_offset =
2221                emit_direct_descriptor_binding_table_entry(cmd_buffer, pipe_state,
2222                                                           set, binding, desc);
2223          }
2224 
2225          bt_map[s] = surface_state_offset + state_offset;
2226          break;
2227       }
2228       }
2229    }
2230 
2231    return VK_SUCCESS;
2232 }
2233 
2234 static VkResult
emit_samplers(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_shader_bin * shader,struct anv_state * state)2235 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
2236               struct anv_cmd_pipeline_state *pipe_state,
2237               struct anv_shader_bin *shader,
2238               struct anv_state *state)
2239 {
2240    struct anv_pipeline_bind_map *map = &shader->bind_map;
2241    if (map->sampler_count == 0) {
2242       *state = (struct anv_state) { 0, };
2243       return VK_SUCCESS;
2244    }
2245 
2246    uint32_t size = map->sampler_count * 16;
2247    *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
2248 
2249    if (state->map == NULL)
2250       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2251 
2252    for (uint32_t s = 0; s < map->sampler_count; s++) {
2253       struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
2254       const struct anv_descriptor *desc =
2255          &pipe_state->descriptors[binding->set]->descriptors[binding->index];
2256 
2257       if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
2258           desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2259          continue;
2260 
2261       struct anv_sampler *sampler = desc->sampler;
2262 
2263       /* This can happen if we have an unfilled slot since TYPE_SAMPLER
2264        * happens to be zero.
2265        */
2266       if (sampler == NULL)
2267          continue;
2268 
2269       memcpy(state->map + (s * 16), sampler->state[binding->plane],
2270              sizeof(sampler->state[0]));
2271    }
2272 
2273    return VK_SUCCESS;
2274 }
2275 
2276 uint32_t
genX(cmd_buffer_flush_descriptor_sets)2277 genX(cmd_buffer_flush_descriptor_sets)(struct anv_cmd_buffer *cmd_buffer,
2278                                        struct anv_cmd_pipeline_state *pipe_state,
2279                                        const VkShaderStageFlags dirty,
2280                                        struct anv_shader_bin **shaders,
2281                                        uint32_t num_shaders)
2282 {
2283    VkShaderStageFlags flushed = 0;
2284 
2285    VkResult result = VK_SUCCESS;
2286    for (uint32_t i = 0; i < num_shaders; i++) {
2287       if (!shaders[i])
2288          continue;
2289 
2290       gl_shader_stage stage = shaders[i]->stage;
2291       VkShaderStageFlags vk_stage = mesa_to_vk_shader_stage(stage);
2292       if ((vk_stage & dirty) == 0)
2293          continue;
2294 
2295       assert(stage < ARRAY_SIZE(cmd_buffer->state.samplers));
2296       result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2297                              &cmd_buffer->state.samplers[stage]);
2298       if (result != VK_SUCCESS)
2299          break;
2300 
2301       assert(stage < ARRAY_SIZE(cmd_buffer->state.binding_tables));
2302       result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2303                                   &cmd_buffer->state.binding_tables[stage]);
2304       if (result != VK_SUCCESS)
2305          break;
2306 
2307       flushed |= vk_stage;
2308    }
2309 
2310    if (result != VK_SUCCESS) {
2311       assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2312 
2313       result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
2314       if (result != VK_SUCCESS)
2315          return 0;
2316 
2317       /* Re-emit the BT base address so we get the new surface state base
2318        * address before we start emitting binding tables etc.
2319        */
2320       genX(cmd_buffer_emit_bt_pool_base_address)(cmd_buffer);
2321 
2322       /* Re-emit all active binding tables */
2323       flushed = 0;
2324 
2325       for (uint32_t i = 0; i < num_shaders; i++) {
2326          if (!shaders[i])
2327             continue;
2328 
2329          gl_shader_stage stage = shaders[i]->stage;
2330 
2331          result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2332                                 &cmd_buffer->state.samplers[stage]);
2333          if (result != VK_SUCCESS) {
2334             anv_batch_set_error(&cmd_buffer->batch, result);
2335             return 0;
2336          }
2337          result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2338                                      &cmd_buffer->state.binding_tables[stage]);
2339          if (result != VK_SUCCESS) {
2340             anv_batch_set_error(&cmd_buffer->batch, result);
2341             return 0;
2342          }
2343 
2344          flushed |= mesa_to_vk_shader_stage(stage);
2345       }
2346    }
2347 
2348    return flushed;
2349 }
2350 
2351 /* This function generates the surface state used to read the content of the
2352  * descriptor buffer.
2353  */
2354 void
genX(cmd_buffer_emit_push_descriptor_buffer_surface)2355 genX(cmd_buffer_emit_push_descriptor_buffer_surface)(struct anv_cmd_buffer *cmd_buffer,
2356                                                      struct anv_descriptor_set *set)
2357 {
2358    assert(set->desc_surface_state.map == NULL);
2359 
2360    struct anv_descriptor_set_layout *layout = set->layout;
2361    enum isl_format format =
2362       anv_isl_format_for_descriptor_type(cmd_buffer->device,
2363                                          VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
2364 
2365    set->desc_surface_state =
2366       anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
2367    if (set->desc_surface_state.map == NULL)
2368       return;
2369    anv_fill_buffer_surface_state(cmd_buffer->device,
2370                                  set->desc_surface_state.map,
2371                                  format, ISL_SWIZZLE_IDENTITY,
2372                                  ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2373                                  set->desc_surface_addr,
2374                                  layout->descriptor_buffer_surface_size, 1);
2375 }
2376 
2377 /* This functions generates surface states used by a pipeline for push
2378  * descriptors. This is delayed to the draw/dispatch time to avoid allocation
2379  * and surface state generation when a pipeline is not going to use the
2380  * binding table to access any push descriptor data.
2381  */
2382 void
genX(cmd_buffer_emit_push_descriptor_surfaces)2383 genX(cmd_buffer_emit_push_descriptor_surfaces)(struct anv_cmd_buffer *cmd_buffer,
2384                                                struct anv_descriptor_set *set)
2385 {
2386    while (set->generate_surface_states) {
2387       int desc_idx = u_bit_scan(&set->generate_surface_states);
2388       struct anv_descriptor *desc = &set->descriptors[desc_idx];
2389       struct anv_buffer_view *bview = desc->set_buffer_view;
2390 
2391       if (bview != NULL && bview->general.state.map == NULL) {
2392          bview->general.state =
2393             anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
2394          if (bview->general.state.map == NULL)
2395             return;
2396          anv_descriptor_write_surface_state(cmd_buffer->device, desc,
2397                                             bview->general.state);
2398       }
2399    }
2400 }
2401 
2402 ALWAYS_INLINE void
genX(batch_emit_pipe_control)2403 genX(batch_emit_pipe_control)(struct anv_batch *batch,
2404                               const struct intel_device_info *devinfo,
2405                               uint32_t current_pipeline,
2406                               enum anv_pipe_bits bits,
2407                               const char *reason)
2408 {
2409    genX(batch_emit_pipe_control_write)(batch,
2410                                        devinfo,
2411                                        current_pipeline,
2412                                        NoWrite,
2413                                        ANV_NULL_ADDRESS,
2414                                        0,
2415                                        bits,
2416                                        reason);
2417 }
2418 
2419 ALWAYS_INLINE void
genX(batch_emit_pipe_control_write)2420 genX(batch_emit_pipe_control_write)(struct anv_batch *batch,
2421                                     const struct intel_device_info *devinfo,
2422                                     uint32_t current_pipeline,
2423                                     uint32_t post_sync_op,
2424                                     struct anv_address address,
2425                                     uint32_t imm_data,
2426                                     enum anv_pipe_bits bits,
2427                                     const char *reason)
2428 {
2429    if ((batch->engine_class == INTEL_ENGINE_CLASS_COPY) ||
2430        (batch->engine_class == INTEL_ENGINE_CLASS_VIDEO))
2431       unreachable("Trying to emit unsupported PIPE_CONTROL command.");
2432 
2433    const bool trace_flush =
2434       (bits & (ANV_PIPE_FLUSH_BITS |
2435                ANV_PIPE_STALL_BITS |
2436                ANV_PIPE_INVALIDATE_BITS |
2437                ANV_PIPE_END_OF_PIPE_SYNC_BIT)) != 0;
2438    if (trace_flush && batch->trace != NULL) {
2439       // Store pipe control reasons if there is enough space
2440       if (batch->pc_reasons_count < ARRAY_SIZE(batch->pc_reasons)) {
2441          batch->pc_reasons[batch->pc_reasons_count++] = reason;
2442       }
2443       trace_intel_begin_stall(batch->trace);
2444    }
2445 
2446 
2447    /* XXX - insert all workarounds and GFX specific things below. */
2448 
2449    /* Wa_14014966230: For COMPUTE Workload - Any PIPE_CONTROL command with
2450     * POST_SYNC Operation Enabled MUST be preceded by a PIPE_CONTROL
2451     * with CS_STALL Bit set (with No POST_SYNC ENABLED)
2452     */
2453    if (intel_device_info_is_adln(devinfo) &&
2454        current_pipeline == GPGPU &&
2455        post_sync_op != NoWrite) {
2456       anv_batch_emit(batch, GENX(PIPE_CONTROL), pipe) {
2457          pipe.CommandStreamerStallEnable = true;
2458          anv_debug_dump_pc(pipe, "Wa_14014966230");
2459       };
2460    }
2461 
2462    /* SKL PRMs, Volume 7: 3D-Media-GPGPU, Programming Restrictions for
2463     * PIPE_CONTROL, Flush Types:
2464     *   "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
2465     * For newer platforms this is documented in the PIPE_CONTROL instruction
2466     * page.
2467     */
2468    if (current_pipeline == GPGPU &&
2469        (bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT))
2470       bits |= ANV_PIPE_CS_STALL_BIT;
2471 
2472 #if INTEL_NEEDS_WA_1409600907
2473    /* Wa_1409600907: "PIPE_CONTROL with Depth Stall Enable bit must
2474     * be set with any PIPE_CONTROL with Depth Flush Enable bit set.
2475     */
2476    if (bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT)
2477       bits |= ANV_PIPE_DEPTH_STALL_BIT;
2478 #endif
2479 
2480 #if GFX_VERx10 >= 125
2481    if (current_pipeline != GPGPU) {
2482       if (bits & ANV_PIPE_HDC_PIPELINE_FLUSH_BIT)
2483          bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
2484    } else {
2485       if (bits & (ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
2486                   ANV_PIPE_DATA_CACHE_FLUSH_BIT))
2487          bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
2488    }
2489 
2490    /* BSpec 47112: PIPE_CONTROL::Untyped Data-Port Cache Flush:
2491     *
2492     *    "'HDC Pipeline Flush' bit must be set for this bit to take
2493     *     effect."
2494     */
2495    if (bits & ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT)
2496       bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2497 #endif
2498 
2499 #if GFX_VER < 12
2500    if (bits & ANV_PIPE_HDC_PIPELINE_FLUSH_BIT)
2501       bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2502 #endif
2503 
2504    /* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
2505     *
2506     *    "If the VF Cache Invalidation Enable is set to a 1 in a
2507     *    PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields sets to
2508     *    0, with the VF Cache Invalidation Enable set to 0 needs to be sent
2509     *    prior to the PIPE_CONTROL with VF Cache Invalidation Enable set to
2510     *    a 1."
2511     *
2512     * This appears to hang Broadwell, so we restrict it to just gfx9.
2513     */
2514    if (GFX_VER == 9 && (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT))
2515       anv_batch_emit(batch, GENX(PIPE_CONTROL), pipe);
2516 
2517    anv_batch_emit(batch, GENX(PIPE_CONTROL), pipe) {
2518 #if GFX_VERx10 >= 125
2519       pipe.UntypedDataPortCacheFlushEnable =
2520          bits & ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
2521       pipe.CCSFlushEnable = bits & ANV_PIPE_CCS_CACHE_FLUSH_BIT;
2522 #endif
2523 #if GFX_VER == 12
2524       pipe.TileCacheFlushEnable = bits & ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2525       pipe.L3FabricFlush = bits & ANV_PIPE_L3_FABRIC_FLUSH_BIT;
2526 #endif
2527 #if GFX_VER > 11
2528       pipe.HDCPipelineFlushEnable = bits & ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
2529 #endif
2530       pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2531       pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2532       pipe.RenderTargetCacheFlushEnable =
2533          bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2534 
2535       pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
2536 
2537       pipe.TLBInvalidate = bits & ANV_PIPE_TLB_INVALIDATE_BIT;
2538 
2539 #if GFX_VERx10 >= 125
2540       pipe.PSSStallSyncEnable = bits & ANV_PIPE_PSS_STALL_SYNC_BIT;
2541 #endif
2542       pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
2543       pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
2544 
2545       pipe.StateCacheInvalidationEnable =
2546          bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
2547       pipe.ConstantCacheInvalidationEnable =
2548          bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2549 #if GFX_VER >= 12
2550       /* Invalidates the L3 cache part in which index & vertex data is loaded
2551        * when VERTEX_BUFFER_STATE::L3BypassDisable is set.
2552        */
2553       pipe.L3ReadOnlyCacheInvalidationEnable =
2554          bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2555 #endif
2556       pipe.VFCacheInvalidationEnable =
2557          bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2558       pipe.TextureCacheInvalidationEnable =
2559          bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2560       pipe.InstructionCacheInvalidateEnable =
2561          bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
2562 
2563       pipe.PostSyncOperation = post_sync_op;
2564       pipe.Address = address;
2565       pipe.DestinationAddressType = DAT_PPGTT;
2566       pipe.ImmediateData = imm_data;
2567 
2568       anv_debug_dump_pc(pipe, reason);
2569    }
2570 
2571    if (trace_flush && batch->trace != NULL) {
2572       trace_intel_end_stall(batch->trace, bits,
2573                             anv_pipe_flush_bit_to_ds_stall_flag,
2574                             batch->pc_reasons[0],
2575                             batch->pc_reasons[1],
2576                             batch->pc_reasons[2],
2577                             batch->pc_reasons[3]);
2578       batch->pc_reasons[0] = NULL;
2579       batch->pc_reasons[1] = NULL;
2580       batch->pc_reasons[2] = NULL;
2581       batch->pc_reasons[3] = NULL;
2582       batch->pc_reasons_count = 0;
2583    }
2584 }
2585 
2586 /* Set preemption on/off. */
2587 void
genX(batch_set_preemption)2588 genX(batch_set_preemption)(struct anv_batch *batch,
2589                            struct anv_device *device,
2590                            uint32_t current_pipeline,
2591                            bool value)
2592 {
2593 #if INTEL_WA_16013994831_GFX_VER
2594    if (!intel_needs_workaround(device->info, 16013994831))
2595       return;
2596 
2597    anv_batch_write_reg(batch, GENX(CS_CHICKEN1), cc1) {
2598       cc1.DisablePreemptionandHighPriorityPausingdueto3DPRIMITIVECommand = !value;
2599       cc1.DisablePreemptionandHighPriorityPausingdueto3DPRIMITIVECommandMask = true;
2600    }
2601 
2602    /* Wa_16013994831 - we need to insert CS_STALL and 250 noops. */
2603    genx_batch_emit_pipe_control(batch, device->info, current_pipeline,
2604                                 ANV_PIPE_CS_STALL_BIT);
2605 
2606    for (unsigned i = 0; i < 250; i++)
2607       anv_batch_emit(batch, GENX(MI_NOOP), noop);
2608 #endif
2609 }
2610 
2611 void
genX(cmd_buffer_set_preemption)2612 genX(cmd_buffer_set_preemption)(struct anv_cmd_buffer *cmd_buffer, bool value)
2613 {
2614 #if GFX_VERx10 >= 120
2615    if (cmd_buffer->state.gfx.object_preemption == value)
2616       return;
2617 
2618    genX(batch_set_preemption)(&cmd_buffer->batch, cmd_buffer->device,
2619                               cmd_buffer->state.current_pipeline,
2620                               value);
2621    cmd_buffer->state.gfx.object_preemption = value;
2622 #endif
2623 }
2624 
2625 ALWAYS_INLINE static void
update_descriptor_set_surface_state(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,uint32_t set_idx)2626 update_descriptor_set_surface_state(struct anv_cmd_buffer *cmd_buffer,
2627                                     struct anv_cmd_pipeline_state *pipe_state,
2628                                     uint32_t set_idx)
2629 {
2630    if (!pipe_state->descriptor_buffers[set_idx].bound)
2631       return;
2632 
2633    const struct anv_physical_device *device = cmd_buffer->device->physical;
2634    const int32_t buffer_index =
2635       pipe_state->descriptor_buffers[set_idx].buffer_index;
2636    const struct anv_va_range *push_va_range =
2637       GFX_VERx10 >= 125 ?
2638       &device->va.push_descriptor_buffer_pool :
2639       &device->va.internal_surface_state_pool;
2640    const struct anv_va_range *va_range =
2641       buffer_index == -1 ? push_va_range : &device->va.dynamic_visible_pool;
2642    const uint64_t descriptor_set_addr =
2643       (buffer_index == -1 ? va_range->addr :
2644        cmd_buffer->state.descriptor_buffers.address[buffer_index]) +
2645       pipe_state->descriptor_buffers[set_idx].buffer_offset;
2646    const uint64_t set_size =
2647       MIN2(va_range->size - (descriptor_set_addr - va_range->addr),
2648            anv_physical_device_bindless_heap_size(device, true));
2649 
2650    if (descriptor_set_addr != pipe_state->descriptor_buffers[set_idx].address) {
2651       pipe_state->descriptor_buffers[set_idx].address = descriptor_set_addr;
2652 
2653       struct anv_state surface_state =
2654          anv_cmd_buffer_alloc_surface_states(cmd_buffer, 1);
2655       const enum isl_format format =
2656          anv_isl_format_for_descriptor_type(cmd_buffer->device,
2657                                             VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
2658       anv_fill_buffer_surface_state(
2659          cmd_buffer->device, surface_state.map,
2660          format, ISL_SWIZZLE_IDENTITY,
2661          ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2662          anv_address_from_u64(pipe_state->descriptor_buffers[set_idx].address),
2663          set_size, 1);
2664 
2665       pipe_state->descriptor_buffers[set_idx].state = surface_state;
2666    }
2667 }
2668 
2669 ALWAYS_INLINE static uint32_t
compute_descriptor_set_surface_offset(const struct anv_cmd_buffer * cmd_buffer,const struct anv_cmd_pipeline_state * pipe_state,const uint32_t set_idx)2670 compute_descriptor_set_surface_offset(const struct anv_cmd_buffer *cmd_buffer,
2671                                       const struct anv_cmd_pipeline_state *pipe_state,
2672                                       const uint32_t set_idx)
2673 {
2674    const struct anv_physical_device *device = cmd_buffer->device->physical;
2675 
2676    if (device->uses_ex_bso) {
2677       int32_t buffer_index =
2678          pipe_state->descriptor_buffers[set_idx].buffer_index;
2679       uint64_t buffer_address =
2680          buffer_index == -1 ?
2681          device->va.push_descriptor_buffer_pool.addr :
2682          cmd_buffer->state.descriptor_buffers.address[buffer_index];
2683 
2684       return (buffer_address - device->va.dynamic_visible_pool.addr) +
2685               pipe_state->descriptor_buffers[set_idx].buffer_offset;
2686    }
2687 
2688    return pipe_state->descriptor_buffers[set_idx].buffer_offset << 6;
2689 }
2690 
2691 ALWAYS_INLINE static uint32_t
compute_descriptor_set_sampler_offset(const struct anv_cmd_buffer * cmd_buffer,const struct anv_cmd_pipeline_state * pipe_state,const uint32_t set_idx)2692 compute_descriptor_set_sampler_offset(const struct anv_cmd_buffer *cmd_buffer,
2693                                       const struct anv_cmd_pipeline_state *pipe_state,
2694                                       const uint32_t set_idx)
2695 {
2696    const struct anv_physical_device *device = cmd_buffer->device->physical;
2697    int32_t buffer_index =
2698       pipe_state->descriptor_buffers[set_idx].buffer_index;
2699    uint64_t buffer_address =
2700       buffer_index == -1 ?
2701       device->va.push_descriptor_buffer_pool.addr :
2702       cmd_buffer->state.descriptor_buffers.address[buffer_index];
2703 
2704    return (buffer_address - device->va.dynamic_state_pool.addr) +
2705       pipe_state->descriptor_buffers[set_idx].buffer_offset;
2706 }
2707 
2708 void
genX(flush_descriptor_buffers)2709 genX(flush_descriptor_buffers)(struct anv_cmd_buffer *cmd_buffer,
2710                                struct anv_cmd_pipeline_state *pipe_state)
2711 {
2712    /* On Gfx12.5+ the STATE_BASE_ADDRESS BindlessSurfaceStateBaseAddress &
2713     * DynamicStateBaseAddress are fixed. So as long as we stay in one
2714     * descriptor buffer mode, there is no need to switch.
2715     */
2716 #if GFX_VERx10 >= 125
2717    if (cmd_buffer->state.current_db_mode !=
2718        cmd_buffer->state.pending_db_mode)
2719       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2720 #else
2721    if (cmd_buffer->state.descriptor_buffers.dirty)
2722       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2723 #endif
2724 
2725    assert(cmd_buffer->state.current_db_mode !=
2726           ANV_CMD_DESCRIPTOR_BUFFER_MODE_UNKNOWN);
2727    if (cmd_buffer->state.current_db_mode == ANV_CMD_DESCRIPTOR_BUFFER_MODE_BUFFER &&
2728        (cmd_buffer->state.descriptor_buffers.dirty ||
2729         (pipe_state->pipeline->active_stages &
2730          cmd_buffer->state.descriptor_buffers.offsets_dirty) != 0)) {
2731       struct anv_push_constants *push_constants =
2732          &pipe_state->push_constants;
2733       for (uint32_t i = 0; i < ARRAY_SIZE(push_constants->desc_surface_offsets); i++) {
2734          update_descriptor_set_surface_state(cmd_buffer, pipe_state, i);
2735 
2736          push_constants->desc_surface_offsets[i] =
2737             compute_descriptor_set_surface_offset(cmd_buffer, pipe_state, i);
2738          push_constants->desc_sampler_offsets[i] =
2739             compute_descriptor_set_sampler_offset(cmd_buffer, pipe_state, i);
2740       }
2741 
2742 #if GFX_VERx10 < 125
2743       struct anv_device *device = cmd_buffer->device;
2744       push_constants->surfaces_base_offset =
2745          (cmd_buffer->state.descriptor_buffers.surfaces_address -
2746           device->physical->va.dynamic_visible_pool.addr);
2747 #endif
2748 
2749       cmd_buffer->state.push_constants_dirty |=
2750          (cmd_buffer->state.descriptor_buffers.offsets_dirty &
2751           pipe_state->pipeline->active_stages);
2752       pipe_state->push_constants_data_dirty = true;
2753       cmd_buffer->state.descriptor_buffers.offsets_dirty &=
2754          ~pipe_state->pipeline->active_stages;
2755    }
2756 
2757    cmd_buffer->state.descriptor_buffers.dirty = false;
2758 }
2759 
2760 void
genX(cmd_buffer_begin_companion)2761 genX(cmd_buffer_begin_companion)(struct anv_cmd_buffer *cmd_buffer,
2762                                  VkCommandBufferLevel level)
2763 {
2764    cmd_buffer->vk.level = level;
2765    cmd_buffer->is_companion_rcs_cmd_buffer = true;
2766 
2767    trace_intel_begin_cmd_buffer(&cmd_buffer->trace);
2768 
2769 #if GFX_VER >= 12
2770    /* Reenable prefetching at the beginning of secondary command buffers. We
2771     * do this so that the return instruction edition is not prefetched before
2772     * completion.
2773     */
2774    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
2775       anv_batch_emit(&cmd_buffer->batch, GENX(MI_ARB_CHECK), arb) {
2776          arb.PreParserDisableMask = true;
2777          arb.PreParserDisable = false;
2778       }
2779    }
2780 #endif
2781 
2782    /* A companion command buffer is only used for blorp commands atm, so
2783     * default to the legacy mode.
2784     */
2785    cmd_buffer->state.current_db_mode = ANV_CMD_DESCRIPTOR_BUFFER_MODE_LEGACY;
2786    genX(cmd_buffer_emit_bt_pool_base_address)(cmd_buffer);
2787 
2788    /* Invalidate the aux table in every primary command buffer. This ensures
2789     * the command buffer see the last updates made by the host.
2790     */
2791    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY &&
2792        cmd_buffer->device->info->has_aux_map) {
2793       anv_add_pending_pipe_bits(cmd_buffer,
2794                                 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
2795                                 "new cmd buffer with aux-tt");
2796    }
2797 }
2798 
2799 static bool
aux_op_resolves(enum isl_aux_op aux_op)2800 aux_op_resolves(enum isl_aux_op aux_op)
2801 {
2802    return aux_op == ISL_AUX_OP_FULL_RESOLVE ||
2803           aux_op == ISL_AUX_OP_PARTIAL_RESOLVE;
2804 }
2805 
2806 static bool
aux_op_clears(enum isl_aux_op aux_op)2807 aux_op_clears(enum isl_aux_op aux_op)
2808 {
2809    return aux_op == ISL_AUX_OP_FAST_CLEAR ||
2810           aux_op == ISL_AUX_OP_AMBIGUATE;
2811 }
2812 
2813 static bool
aux_op_renders(enum isl_aux_op aux_op)2814 aux_op_renders(enum isl_aux_op aux_op)
2815 {
2816    return aux_op == ISL_AUX_OP_NONE;
2817 }
2818 
2819 static void
add_pending_pipe_bits_for_color_aux_op(struct anv_cmd_buffer * cmd_buffer,enum isl_aux_op next_aux_op,enum anv_pipe_bits pipe_bits)2820 add_pending_pipe_bits_for_color_aux_op(struct anv_cmd_buffer *cmd_buffer,
2821                                        enum isl_aux_op next_aux_op,
2822                                        enum anv_pipe_bits pipe_bits)
2823 {
2824    const enum isl_aux_op last_aux_op = cmd_buffer->state.color_aux_op;
2825    assert(next_aux_op != last_aux_op);
2826 
2827    char flush_reason[64] = {};
2828    if (INTEL_DEBUG(DEBUG_PIPE_CONTROL) ||
2829        u_trace_enabled(&cmd_buffer->device->ds.trace_context)) {
2830       int ret = snprintf(flush_reason, sizeof(flush_reason),
2831                          "color aux-op: %s -> %s",
2832                          isl_aux_op_to_name(last_aux_op),
2833                          isl_aux_op_to_name(next_aux_op));
2834       assert(ret < sizeof(flush_reason));
2835    }
2836 
2837    anv_add_pending_pipe_bits(cmd_buffer, pipe_bits, flush_reason);
2838 }
2839 
2840 void
genX(cmd_buffer_update_color_aux_op)2841 genX(cmd_buffer_update_color_aux_op)(struct anv_cmd_buffer *cmd_buffer,
2842                                      enum isl_aux_op next_aux_op)
2843 {
2844    const enum isl_aux_op last_aux_op = cmd_buffer->state.color_aux_op;
2845 
2846    if (!aux_op_clears(last_aux_op) && aux_op_clears(next_aux_op)) {
2847 #if GFX_VER >= 20
2848       /* From the Xe2 Bspec 57340 (r59562),
2849        * "MCS/CCS Buffers, Fast Clear for Render Target(s)":
2850        *
2851        *    Synchronization:
2852        *    Due to interaction of scaled clearing rectangle with pixel
2853        *    scoreboard, we require one of the following commands to be
2854        *    issued. [...]
2855        *
2856        *    PIPE_CONTROL
2857        *    PSS Stall Sync Enable            [...] 1b (Enable)
2858        *       Machine-wide Stall at Pixel Stage, wait for all Prior Pixel
2859        *       Work to Reach End of Pipe
2860        *    Render Target Cache Flush Enable [...] 1b (Enable)
2861        *       Post-Sync Op Flushes Render Cache before Unblocking Stall
2862        *
2863        *    This synchronization step is required before and after the fast
2864        *    clear pass, to ensure correct ordering between pixels.
2865        */
2866       add_pending_pipe_bits_for_color_aux_op(
2867             cmd_buffer, next_aux_op,
2868             ANV_PIPE_PSS_STALL_SYNC_BIT |
2869             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT);
2870 
2871 #elif GFX_VERx10 == 125
2872       /* From the ACM Bspec 47704 (r52663), "Render Target Fast Clear":
2873        *
2874        *    Preamble pre fast clear synchronization
2875        *
2876        *    PIPE_CONTROL:
2877        *    PS sync stall = 1
2878        *    Tile Cache Flush = 1
2879        *    RT Write Flush = 1
2880        *    HDC Flush = 1
2881        *    DC Flush = 1
2882        *    Texture Invalidate = 1
2883        *
2884        *    [...]
2885        *
2886        *    Objective of the preamble flushes is to ensure all data is
2887        *    evicted from L1 caches prior to fast clear.
2888        */
2889       add_pending_pipe_bits_for_color_aux_op(
2890             cmd_buffer, next_aux_op,
2891             ANV_PIPE_PSS_STALL_SYNC_BIT |
2892             ANV_PIPE_TILE_CACHE_FLUSH_BIT |
2893             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
2894             ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
2895             ANV_PIPE_DATA_CACHE_FLUSH_BIT |
2896             ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT);
2897 
2898 #elif GFX_VERx10 == 120
2899       /* From the TGL Bspec 47704 (r52663), "Render Target Fast Clear":
2900        *
2901        *    Preamble pre fast clear synchronization
2902        *
2903        *    PIPE_CONTROL:
2904        *    Depth Stall = 1
2905        *    Tile Cache Flush = 1
2906        *    RT Write Flush = 1
2907        *    Texture Invalidate = 1
2908        *
2909        *    [...]
2910        *
2911        *    Objective of the preamble flushes is to ensure all data is
2912        *    evicted from L1 caches prior to fast clear.
2913        */
2914       add_pending_pipe_bits_for_color_aux_op(
2915             cmd_buffer, next_aux_op,
2916             ANV_PIPE_DEPTH_STALL_BIT  |
2917             ANV_PIPE_TILE_CACHE_FLUSH_BIT |
2918             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
2919             ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT);
2920 
2921 #else
2922       /* From the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
2923        *
2924        *    Any transition from any value in {Clear, Render, Resolve} to a
2925        *    different value in {Clear, Render, Resolve} requires end of pipe
2926        *    synchronization.
2927        *
2928        * From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
2929        *
2930        *    After Render target fast clear, pipe-control with color cache
2931        *    write-flush must be issued before sending any DRAW commands on
2932        *    that render target.
2933        *
2934        * The last comment is a bit cryptic and doesn't really tell you what's
2935        * going or what's really needed.  It appears that fast clear ops are
2936        * not properly synchronized with other drawing.  This means that we
2937        * cannot have a fast clear operation in the pipe at the same time as
2938        * other regular drawing operations.  We need to use a PIPE_CONTROL
2939        * to ensure that the contents of the previous draw hit the render
2940        * target before we resolve and then use a second PIPE_CONTROL after
2941        * the resolve to ensure that it is completed before any additional
2942        * drawing occurs.
2943        */
2944       add_pending_pipe_bits_for_color_aux_op(
2945             cmd_buffer, next_aux_op,
2946             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
2947             ANV_PIPE_END_OF_PIPE_SYNC_BIT);
2948 #endif
2949 
2950    } else if (aux_op_clears(last_aux_op) && !aux_op_clears(next_aux_op)) {
2951 #if GFX_VERx10 >= 125
2952       /* From the ACM PRM Vol. 9, "Color Fast Clear Synchronization":
2953        *
2954        *    Postamble post fast clear synchronization
2955        *
2956        *    PIPE_CONTROL:
2957        *    PS sync stall = 1
2958        *    RT flush = 1
2959        */
2960       add_pending_pipe_bits_for_color_aux_op(
2961             cmd_buffer, next_aux_op,
2962             ANV_PIPE_PSS_STALL_SYNC_BIT |
2963             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT);
2964 
2965 #elif GFX_VERx10 == 120
2966       /* From the TGL PRM Vol. 9, "Color Fast Clear Synchronization":
2967        *
2968        *    Postamble post fast clear synchronization
2969        *
2970        *    PIPE_CONTROL:
2971        *    Depth Stall = 1
2972        *    Tile Cache Flush = 1
2973        *    RT Write Flush = 1
2974        *
2975        * From the TGL PRM Vol. 2a, "PIPE_CONTROL::L3 Fabric Flush":
2976        *
2977        *    For a sequence of color fast clears. A single PIPE_CONTROL
2978        *    command with Render Target Cache Flush, L3 Fabric Flush and Depth
2979        *    Stall set at the end of the sequence suffices.
2980        *
2981        * Replace the Tile Cache flush with an L3 fabric flush.
2982        */
2983       add_pending_pipe_bits_for_color_aux_op(
2984             cmd_buffer, next_aux_op,
2985             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
2986             ANV_PIPE_L3_FABRIC_FLUSH_BIT |
2987             ANV_PIPE_DEPTH_STALL_BIT);
2988 
2989 #else
2990       /* From the Sky Lake PRM Vol. 7, "Render Target Fast Clear":
2991        *
2992        *    After Render target fast clear, pipe-control with color cache
2993        *    write-flush must be issued before sending any DRAW commands on
2994        *    that render target.
2995        *
2996        * From the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
2997        *
2998        *    Any transition from any value in {Clear, Render, Resolve} to a
2999        *    different value in {Clear, Render, Resolve} requires end of pipe
3000        *    synchronization.
3001        */
3002       add_pending_pipe_bits_for_color_aux_op(
3003             cmd_buffer, next_aux_op,
3004             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
3005             ANV_PIPE_END_OF_PIPE_SYNC_BIT);
3006 #endif
3007 
3008    } else if (aux_op_renders(last_aux_op) != aux_op_renders(next_aux_op)) {
3009       assert(aux_op_resolves(last_aux_op) != aux_op_resolves(next_aux_op));
3010       /* From the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
3011        *
3012        *    Any transition from any value in {Clear, Render, Resolve} to a
3013        *    different value in {Clear, Render, Resolve} requires end of pipe
3014        *    synchronization.
3015        *
3016        * We perform a flush of the write cache before and after the clear and
3017        * resolve operations to meet this requirement.
3018        *
3019        * Unlike other drawing, fast clear operations are not properly
3020        * synchronized. The first PIPE_CONTROL here likely ensures that the
3021        * contents of the previous render or clear hit the render target before
3022        * we resolve and the second likely ensures that the resolve is complete
3023        * before we do any more rendering or clearing.
3024        */
3025       add_pending_pipe_bits_for_color_aux_op(
3026             cmd_buffer, next_aux_op,
3027             ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
3028             ANV_PIPE_END_OF_PIPE_SYNC_BIT);
3029    }
3030 
3031    if (last_aux_op != ISL_AUX_OP_FAST_CLEAR &&
3032        next_aux_op == ISL_AUX_OP_FAST_CLEAR &&
3033        cmd_buffer->device->isl_dev.ss.clear_color_state_size > 0) {
3034       /* From the ICL PRM Vol. 9, "State Caching":
3035        *
3036        *    Any values referenced by pointers within the RENDER_SURFACE_STATE
3037        *    [...] (e.g. Clear Color Pointer, [...]) are considered to be part
3038        *    of that state and any changes to these referenced values requires
3039        *    an invalidation of the L1 state cache to ensure the new values are
3040        *    being used as part of the state. [...]
3041        *
3042        * We could alternatively perform this invalidation when we stop
3043        * fast-clearing. A benefit to doing it now, when transitioning to a
3044        * fast clear, is that we save a pipe control by combining the state
3045        * cache invalidation with the texture cache invalidation done on gfx12.
3046        */
3047       anv_add_pending_pipe_bits(cmd_buffer,
3048                                 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT,
3049                                 "Invalidate for new clear color");
3050    }
3051 
3052    /* Update the auxiliary surface operation, but with one exception. */
3053    if (last_aux_op == ISL_AUX_OP_FAST_CLEAR &&
3054        next_aux_op == ISL_AUX_OP_AMBIGUATE) {
3055       assert(aux_op_clears(last_aux_op) && aux_op_clears(next_aux_op));
3056       /* Fast clears and ambiguates are in the same class of operation, but
3057        * fast clears have more stringent synchronization requirements. For
3058        * better performance, don't replace the current fast clear operation
3059        * state with ambiguate. This allows us to perform one state cache
3060        * invalidation when leaving a sequence which alternates between
3061        * ambiguates and clears, instead of multiple such invalidations.
3062        */
3063    } else {
3064       cmd_buffer->state.color_aux_op = next_aux_op;
3065    }
3066 
3067    if (next_aux_op == ISL_AUX_OP_FAST_CLEAR) {
3068       if (aux_op_clears(last_aux_op)) {
3069          cmd_buffer->num_dependent_clears++;
3070       } else {
3071          cmd_buffer->num_independent_clears++;
3072       }
3073    }
3074 }
3075 
3076 static void
genX(cmd_buffer_set_protected_memory)3077 genX(cmd_buffer_set_protected_memory)(struct anv_cmd_buffer *cmd_buffer,
3078                                       bool enabled)
3079 {
3080 #if GFX_VER >= 12
3081    if (enabled) {
3082       anv_batch_emit(&cmd_buffer->batch, GENX(MI_SET_APPID), appid) {
3083          /* Default value for single session. */
3084          appid.ProtectedMemoryApplicationID = cmd_buffer->device->protected_session_id;
3085          appid.ProtectedMemoryApplicationIDType = DISPLAY_APP;
3086       }
3087    }
3088    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
3089       pc.PipeControlFlushEnable = true;
3090       pc.DCFlushEnable = true;
3091       pc.RenderTargetCacheFlushEnable = true;
3092       pc.CommandStreamerStallEnable = true;
3093       if (enabled)
3094          pc.ProtectedMemoryEnable = true;
3095       else
3096          pc.ProtectedMemoryDisable = true;
3097    }
3098 #else
3099    unreachable("Protected content not supported");
3100 #endif
3101 }
3102 
3103 VkResult
genX(BeginCommandBuffer)3104 genX(BeginCommandBuffer)(
3105     VkCommandBuffer                             commandBuffer,
3106     const VkCommandBufferBeginInfo*             pBeginInfo)
3107 {
3108    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3109    VkResult result;
3110 
3111    /* If this is the first vkBeginCommandBuffer, we must *initialize* the
3112     * command buffer's state. Otherwise, we must *reset* its state. In both
3113     * cases we reset it.
3114     *
3115     * From the Vulkan 1.0 spec:
3116     *
3117     *    If a command buffer is in the executable state and the command buffer
3118     *    was allocated from a command pool with the
3119     *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
3120     *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
3121     *    as if vkResetCommandBuffer had been called with
3122     *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
3123     *    the command buffer in the recording state.
3124     */
3125    anv_cmd_buffer_reset(&cmd_buffer->vk, 0);
3126    anv_cmd_buffer_reset_rendering(cmd_buffer);
3127 
3128    cmd_buffer->usage_flags = pBeginInfo->flags;
3129 
3130    /* VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT must be ignored for
3131     * primary level command buffers.
3132     *
3133     * From the Vulkan 1.0 spec:
3134     *
3135     *    VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT specifies that a
3136     *    secondary command buffer is considered to be entirely inside a render
3137     *    pass. If this is a primary command buffer, then this bit is ignored.
3138     */
3139    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
3140       cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
3141 
3142 #if GFX_VER >= 12
3143    /* Reenable prefetching at the beginning of secondary command buffers. We
3144     * do this so that the return instruction edition is not prefetched before
3145     * completion.
3146     */
3147    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
3148       anv_batch_emit(&cmd_buffer->batch, GENX(MI_ARB_CHECK), arb) {
3149          arb.PreParserDisableMask = true;
3150          arb.PreParserDisable = false;
3151       }
3152    }
3153 #endif
3154 
3155    /* Assume the viewport has already been set in primary command buffers. */
3156    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
3157       cmd_buffer->state.gfx.viewport_set = true;
3158 
3159    trace_intel_begin_cmd_buffer(&cmd_buffer->trace);
3160 
3161    if (anv_cmd_buffer_is_video_queue(cmd_buffer) ||
3162        anv_cmd_buffer_is_blitter_queue(cmd_buffer)) {
3163       /* Invalidate the aux table in every primary command buffer. This
3164        * ensures the command buffer see the last updates made by the host.
3165        */
3166       if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY &&
3167           cmd_buffer->device->info->has_aux_map) {
3168          anv_add_pending_pipe_bits(cmd_buffer,
3169                                    ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
3170                                    "new cmd buffer with aux-tt");
3171       }
3172       return VK_SUCCESS;
3173    }
3174 
3175 #if GFX_VER >= 12
3176    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY &&
3177        cmd_buffer->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT)
3178       genX(cmd_buffer_set_protected_memory)(cmd_buffer, true);
3179 #endif
3180 
3181    if (cmd_buffer->device->vk.enabled_extensions.EXT_descriptor_buffer) {
3182       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
3183    } else {
3184       cmd_buffer->state.current_db_mode = ANV_CMD_DESCRIPTOR_BUFFER_MODE_LEGACY;
3185       genX(cmd_buffer_emit_bt_pool_base_address)(cmd_buffer);
3186    }
3187 
3188    /* We sometimes store vertex data in the dynamic state buffer for blorp
3189     * operations and our dynamic state stream may re-use data from previous
3190     * command buffers.  In order to prevent stale cache data, we flush the VF
3191     * cache.  We could do this on every blorp call but that's not really
3192     * needed as all of the data will get written by the CPU prior to the GPU
3193     * executing anything.  The chances are fairly high that they will use
3194     * blorp at least once per primary command buffer so it shouldn't be
3195     * wasted.
3196     *
3197     * There is also a workaround on gfx8 which requires us to invalidate the
3198     * VF cache occasionally.  It's easier if we can assume we start with a
3199     * fresh cache (See also genX(cmd_buffer_set_binding_for_gfx8_vb_flush).)
3200     */
3201    anv_add_pending_pipe_bits(cmd_buffer,
3202                              ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
3203                              "new cmd buffer");
3204 
3205    /* Invalidate the aux table in every primary command buffer. This ensures
3206     * the command buffer see the last updates made by the host.
3207     */
3208    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY &&
3209        cmd_buffer->device->info->has_aux_map) {
3210       anv_add_pending_pipe_bits(cmd_buffer,
3211                                 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT,
3212                                 "new cmd buffer with aux-tt");
3213    }
3214 
3215    /* We send an "Indirect State Pointers Disable" packet at
3216     * EndCommandBuffer, so all push constant packets are ignored during a
3217     * context restore. Documentation says after that command, we need to
3218     * emit push constants again before any rendering operation. So we
3219     * flag them dirty here to make sure they get emitted.
3220     */
3221    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
3222    cmd_buffer->state.gfx.base.push_constants_data_dirty = true;
3223 
3224    if (cmd_buffer->usage_flags &
3225        VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
3226       struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
3227 
3228       char gcbiar_data[VK_GCBIARR_DATA_SIZE(MAX_RTS)];
3229       const VkRenderingInfo *resume_info =
3230          vk_get_command_buffer_inheritance_as_rendering_resume(cmd_buffer->vk.level,
3231                                                                pBeginInfo,
3232                                                                gcbiar_data);
3233       if (resume_info != NULL) {
3234          genX(CmdBeginRendering)(commandBuffer, resume_info);
3235       } else {
3236          const VkCommandBufferInheritanceRenderingInfo *inheritance_info =
3237             vk_get_command_buffer_inheritance_rendering_info(cmd_buffer->vk.level,
3238                                                              pBeginInfo);
3239          assert(inheritance_info);
3240 
3241          gfx->rendering_flags = inheritance_info->flags;
3242          gfx->render_area = (VkRect2D) { };
3243          gfx->layer_count = 0;
3244          gfx->samples = inheritance_info->rasterizationSamples;
3245          gfx->view_mask = inheritance_info->viewMask;
3246 
3247          uint32_t color_att_count = inheritance_info->colorAttachmentCount;
3248          result = anv_cmd_buffer_init_attachments(cmd_buffer, color_att_count);
3249          if (result != VK_SUCCESS)
3250             return result;
3251 
3252          for (uint32_t i = 0; i < color_att_count; i++) {
3253             gfx->color_att[i].vk_format =
3254                inheritance_info->pColorAttachmentFormats[i];
3255          }
3256          gfx->depth_att.vk_format =
3257             inheritance_info->depthAttachmentFormat;
3258          gfx->stencil_att.vk_format =
3259             inheritance_info->stencilAttachmentFormat;
3260 
3261          anv_cmd_graphic_state_update_has_uint_rt(gfx);
3262 
3263          cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_AREA |
3264                                         ANV_CMD_DIRTY_RENDER_TARGETS;
3265       }
3266    }
3267 
3268    /* Emit the sample pattern at the beginning of the batch because the
3269     * default locations emitted at the device initialization might have been
3270     * changed by a previous command buffer.
3271     *
3272     * Do not change that when we're continuing a previous renderpass.
3273     */
3274    if (cmd_buffer->device->vk.enabled_extensions.EXT_sample_locations &&
3275        !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))
3276       genX(emit_sample_pattern)(&cmd_buffer->batch, NULL);
3277 
3278    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
3279       const VkCommandBufferInheritanceConditionalRenderingInfoEXT *conditional_rendering_info =
3280          vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext, COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT);
3281 
3282       /* If secondary buffer supports conditional rendering
3283        * we should emit commands as if conditional rendering is enabled.
3284        */
3285       cmd_buffer->state.conditional_render_enabled =
3286          conditional_rendering_info && conditional_rendering_info->conditionalRenderingEnable;
3287 
3288       if (pBeginInfo->pInheritanceInfo->occlusionQueryEnable) {
3289          cmd_buffer->state.gfx.n_occlusion_queries = 1;
3290          cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_OCCLUSION_QUERY_ACTIVE;
3291       }
3292    }
3293 
3294    return VK_SUCCESS;
3295 }
3296 
3297 /* From the PRM, Volume 2a:
3298  *
3299  *    "Indirect State Pointers Disable
3300  *
3301  *    At the completion of the post-sync operation associated with this pipe
3302  *    control packet, the indirect state pointers in the hardware are
3303  *    considered invalid; the indirect pointers are not saved in the context.
3304  *    If any new indirect state commands are executed in the command stream
3305  *    while the pipe control is pending, the new indirect state commands are
3306  *    preserved.
3307  *
3308  *    [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context
3309  *    restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant
3310  *    commands are only considered as Indirect State Pointers. Once ISP is
3311  *    issued in a context, SW must initialize by programming push constant
3312  *    commands for all the shaders (at least to zero length) before attempting
3313  *    any rendering operation for the same context."
3314  *
3315  * 3DSTATE_CONSTANT_* packets are restored during a context restore,
3316  * even though they point to a BO that has been already unreferenced at
3317  * the end of the previous batch buffer. This has been fine so far since
3318  * we are protected by these scratch page (every address not covered by
3319  * a BO should be pointing to the scratch page). But on CNL, it is
3320  * causing a GPU hang during context restore at the 3DSTATE_CONSTANT_*
3321  * instruction.
3322  *
3323  * The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the
3324  * hardware to ignore previous 3DSTATE_CONSTANT_* packets during a
3325  * context restore, so the mentioned hang doesn't happen. However,
3326  * software must program push constant commands for all stages prior to
3327  * rendering anything. So we flag them dirty in BeginCommandBuffer.
3328  *
3329  * Finally, we also make sure to stall at pixel scoreboard to make sure the
3330  * constants have been loaded into the EUs prior to disable the push constants
3331  * so that it doesn't hang a previous 3DPRIMITIVE.
3332  */
3333 static void
emit_isp_disable(struct anv_cmd_buffer * cmd_buffer)3334 emit_isp_disable(struct anv_cmd_buffer *cmd_buffer)
3335 {
3336    genx_batch_emit_pipe_control(&cmd_buffer->batch,
3337                                 cmd_buffer->device->info,
3338                                 cmd_buffer->state.current_pipeline,
3339                                 ANV_PIPE_CS_STALL_BIT |
3340                                 ANV_PIPE_STALL_AT_SCOREBOARD_BIT);
3341    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
3342          pc.IndirectStatePointersDisable = true;
3343          pc.CommandStreamerStallEnable = true;
3344          anv_debug_dump_pc(pc, __func__);
3345    }
3346 }
3347 
3348 static VkResult
end_command_buffer(struct anv_cmd_buffer * cmd_buffer)3349 end_command_buffer(struct anv_cmd_buffer *cmd_buffer)
3350 {
3351    if (anv_batch_has_error(&cmd_buffer->batch))
3352       return cmd_buffer->batch.status;
3353 
3354    anv_measure_endcommandbuffer(cmd_buffer);
3355 
3356    if (anv_cmd_buffer_is_video_queue(cmd_buffer) ||
3357        anv_cmd_buffer_is_blitter_queue(cmd_buffer)) {
3358       trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer->vk.level);
3359       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3360       anv_cmd_buffer_end_batch_buffer(cmd_buffer);
3361       return VK_SUCCESS;
3362    }
3363 
3364    /* Flush query clears using blorp so that secondary query writes do not
3365     * race with the clear.
3366     */
3367    if (cmd_buffer->state.queries.clear_bits) {
3368       anv_add_pending_pipe_bits(cmd_buffer,
3369                                 ANV_PIPE_QUERY_BITS(cmd_buffer->state.queries.clear_bits),
3370                                 "query clear flush prior command buffer end");
3371    }
3372 
3373    /* Flush any in-progress CCS/MCS operations in preparation for chaining. */
3374    genX(cmd_buffer_update_color_aux_op(cmd_buffer, ISL_AUX_OP_NONE));
3375 
3376    genX(cmd_buffer_flush_generated_draws)(cmd_buffer);
3377 
3378    /* Turn on object level preemption if it is disabled to have it in known
3379     * state at the beginning of new command buffer.
3380     */
3381    if (!cmd_buffer->state.gfx.object_preemption)
3382       genX(cmd_buffer_set_preemption)(cmd_buffer, true);
3383 
3384    /* We want every command buffer to start with the PMA fix in a known state,
3385     * so we disable it at the end of the command buffer.
3386     */
3387    genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
3388 
3389    /* Wa_14015814527
3390     *
3391     * Apply task URB workaround in the end of primary or secondary cmd_buffer.
3392     */
3393    genX(apply_task_urb_workaround)(cmd_buffer);
3394 
3395    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3396 
3397    emit_isp_disable(cmd_buffer);
3398 
3399 #if GFX_VER >= 12
3400    if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY &&
3401        cmd_buffer->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT)
3402       genX(cmd_buffer_set_protected_memory)(cmd_buffer, false);
3403 #endif
3404 
3405    trace_intel_end_cmd_buffer(&cmd_buffer->trace, cmd_buffer->vk.level);
3406 
3407    anv_cmd_buffer_end_batch_buffer(cmd_buffer);
3408 
3409    return VK_SUCCESS;
3410 }
3411 
3412 VkResult
genX(EndCommandBuffer)3413 genX(EndCommandBuffer)(
3414     VkCommandBuffer                             commandBuffer)
3415 {
3416    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3417 
3418    VkResult status = end_command_buffer(cmd_buffer);
3419    if (status != VK_SUCCESS)
3420       return status;
3421 
3422    /* If there is MSAA access over the compute/transfer queue, we can use the
3423     * companion RCS command buffer and end it properly.
3424     */
3425    if (cmd_buffer->companion_rcs_cmd_buffer) {
3426        assert(anv_cmd_buffer_is_compute_queue(cmd_buffer) ||
3427               anv_cmd_buffer_is_blitter_queue(cmd_buffer));
3428        status = end_command_buffer(cmd_buffer->companion_rcs_cmd_buffer);
3429    }
3430 
3431    ANV_RMV(cmd_buffer_create, cmd_buffer->device, cmd_buffer);
3432 
3433    return status;
3434 }
3435 
3436 void
genX(CmdExecuteCommands)3437 genX(CmdExecuteCommands)(
3438     VkCommandBuffer                             commandBuffer,
3439     uint32_t                                    commandBufferCount,
3440     const VkCommandBuffer*                      pCmdBuffers)
3441 {
3442    ANV_FROM_HANDLE(anv_cmd_buffer, container, commandBuffer);
3443 
3444    struct anv_device *device = container->device;
3445 
3446    if (anv_batch_has_error(&container->batch))
3447       return;
3448 
3449    /* The secondary command buffers will assume that the PMA fix is disabled
3450     * when they begin executing.  Make sure this is true.
3451     */
3452    genX(cmd_buffer_enable_pma_fix)(container, false);
3453 
3454    /* Turn on preemption in case it was toggled off. */
3455    if (!container->state.gfx.object_preemption)
3456       genX(cmd_buffer_set_preemption)(container, true);
3457 
3458    /* Wa_14015814527
3459     *
3460     * Apply task URB workaround before secondary cmd buffers.
3461     */
3462    genX(apply_task_urb_workaround)(container);
3463 
3464    /* Flush query clears using blorp so that secondary query writes do not
3465     * race with the clear.
3466     */
3467    if (container->state.queries.clear_bits) {
3468       anv_add_pending_pipe_bits(container,
3469                                 ANV_PIPE_QUERY_BITS(container->state.queries.clear_bits),
3470                                 "query clear flush prior to secondary buffer");
3471    }
3472 
3473    /* Ensure we're in a regular drawing cache mode (assumption for all
3474     * secondary).
3475     */
3476    genX(cmd_buffer_update_color_aux_op(container, ISL_AUX_OP_NONE));
3477 
3478    /* The secondary command buffer doesn't know which textures etc. have been
3479     * flushed prior to their execution.  Apply those flushes now.
3480     */
3481    genX(cmd_buffer_apply_pipe_flushes)(container);
3482 
3483    genX(cmd_buffer_flush_generated_draws)(container);
3484 
3485    UNUSED enum anv_cmd_descriptor_buffer_mode db_mode =
3486       container->state.current_db_mode;
3487 
3488    /* Do a first pass to copy the surface state content of the render targets
3489     * if needed.
3490     */
3491    bool need_surface_state_copy = false;
3492    for (uint32_t i = 0; i < commandBufferCount; i++) {
3493       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
3494 
3495       if (secondary->usage_flags &
3496           VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
3497          need_surface_state_copy = true;
3498          break;
3499       }
3500    }
3501 
3502    if (need_surface_state_copy) {
3503       if (container->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT)
3504          genX(cmd_buffer_set_protected_memory)(container, false);
3505 
3506       /* The memcpy will take care of the 3D preemption requirements. */
3507       struct anv_memcpy_state memcpy_state;
3508       genX(emit_so_memcpy_init)(&memcpy_state, device,
3509                                 container, &container->batch);
3510 
3511       for (uint32_t i = 0; i < commandBufferCount; i++) {
3512          ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
3513 
3514          assert(secondary->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
3515          assert(!anv_batch_has_error(&secondary->batch));
3516 
3517          if (secondary->usage_flags &
3518              VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
3519             /* If we're continuing a render pass from the container, we need
3520              * to copy the surface states for the current subpass into the
3521              * storage we allocated for them in BeginCommandBuffer.
3522              */
3523             struct anv_state src_state = container->state.gfx.att_states;
3524             struct anv_state dst_state = secondary->state.gfx.att_states;
3525             assert(src_state.alloc_size == dst_state.alloc_size);
3526 
3527             genX(emit_so_memcpy)(
3528                &memcpy_state,
3529                anv_state_pool_state_address(&device->internal_surface_state_pool,
3530                                             dst_state),
3531                anv_state_pool_state_address(&device->internal_surface_state_pool,
3532                                             src_state),
3533                src_state.alloc_size);
3534          }
3535       }
3536       genX(emit_so_memcpy_fini)(&memcpy_state);
3537 
3538       anv_add_pending_pipe_bits(container,
3539                                 ANV_PIPE_CS_STALL_BIT | ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
3540                                 "Wait for primary->secondary RP surface state copies");
3541       genX(cmd_buffer_apply_pipe_flushes)(container);
3542 
3543       if (container->vk.pool->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT)
3544          genX(cmd_buffer_set_protected_memory)(container, true);
3545    }
3546 
3547    /* Ensure preemption is enabled (assumption for all secondary) */
3548    genX(cmd_buffer_set_preemption)(container, true);
3549 
3550    for (uint32_t i = 0; i < commandBufferCount; i++) {
3551       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
3552 
3553       assert(secondary->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
3554       assert(!anv_batch_has_error(&secondary->batch));
3555 
3556       if (secondary->state.conditional_render_enabled) {
3557          if (!container->state.conditional_render_enabled) {
3558             /* Secondary buffer is constructed as if it will be executed
3559              * with conditional rendering, we should satisfy this dependency
3560              * regardless of conditional rendering being enabled in container.
3561              */
3562             struct mi_builder b;
3563             mi_builder_init(&b, device->info, &container->batch);
3564             mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
3565                          mi_imm(UINT64_MAX));
3566          }
3567       }
3568 
3569       anv_cmd_buffer_add_secondary(container, secondary);
3570 
3571       /* Add secondary buffer's RCS command buffer to container buffer's RCS
3572        * command buffer for execution if secondary RCS is valid.
3573        */
3574       if (secondary->companion_rcs_cmd_buffer != NULL) {
3575          VkResult result = anv_cmd_buffer_ensure_rcs_companion(container);
3576          if (result != VK_SUCCESS) {
3577             anv_batch_set_error(&container->batch, result);
3578             return;
3579          }
3580 
3581          anv_cmd_buffer_add_secondary(container->companion_rcs_cmd_buffer,
3582                                       secondary->companion_rcs_cmd_buffer);
3583       }
3584 
3585       assert(secondary->perf_query_pool == NULL || container->perf_query_pool == NULL ||
3586              secondary->perf_query_pool == container->perf_query_pool);
3587       if (secondary->perf_query_pool)
3588          container->perf_query_pool = secondary->perf_query_pool;
3589 
3590 #if INTEL_NEEDS_WA_1808121037
3591       if (secondary->state.gfx.depth_reg_mode != ANV_DEPTH_REG_MODE_UNKNOWN)
3592          container->state.gfx.depth_reg_mode = secondary->state.gfx.depth_reg_mode;
3593 #endif
3594 
3595       container->state.gfx.viewport_set |= secondary->state.gfx.viewport_set;
3596 
3597       db_mode = secondary->state.current_db_mode;
3598    }
3599 
3600    /* The secondary isn't counted in our VF cache tracking so we need to
3601     * invalidate the whole thing.
3602     */
3603    if (GFX_VER == 9) {
3604       anv_add_pending_pipe_bits(container,
3605                                 ANV_PIPE_CS_STALL_BIT | ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
3606                                 "Secondary cmd buffer not tracked in VF cache");
3607    }
3608 
3609 #if INTEL_WA_16014538804_GFX_VER
3610    if (anv_cmd_buffer_is_render_queue(container) &&
3611        intel_needs_workaround(device->info, 16014538804))
3612       anv_batch_emit(&container->batch, GENX(PIPE_CONTROL), pc);
3613 #endif
3614 
3615    /* The secondary may have selected a different pipeline (3D or compute) and
3616     * may have changed the current L3$ configuration.  Reset our tracking
3617     * variables to invalid values to ensure that we re-emit these in the case
3618     * where we do any draws or compute dispatches from the container after the
3619     * secondary has returned.
3620     */
3621    container->state.current_pipeline = UINT32_MAX;
3622    container->state.current_l3_config = NULL;
3623    container->state.current_hash_scale = 0;
3624    container->state.gfx.push_constant_stages = 0;
3625 
3626    memset(&container->state.gfx.urb_cfg, 0, sizeof(struct intel_urb_config));
3627 
3628    /* Reemit all GFX instructions in container */
3629    memcpy(container->state.gfx.dyn_state.dirty,
3630           device->gfx_dirty_state,
3631           sizeof(container->state.gfx.dyn_state.dirty));
3632    if (container->device->vk.enabled_extensions.KHR_fragment_shading_rate) {
3633       /* Also recompute the CPS_STATE offset */
3634       struct vk_dynamic_graphics_state *dyn =
3635          &container->vk.dynamic_graphics_state;
3636       BITSET_SET(dyn->dirty, MESA_VK_DYNAMIC_FSR);
3637    }
3638 
3639    /* Each of the secondary command buffers will use its own state base
3640     * address.  We need to re-emit state base address for the container after
3641     * all of the secondaries are done.
3642     */
3643    if (container->device->vk.enabled_extensions.EXT_descriptor_buffer) {
3644 #if GFX_VERx10 >= 125
3645       /* If the last secondary had a different mode, reemit the last pending
3646        * mode. Otherwise, we can do a lighter binding table pool update.
3647        */
3648       if (db_mode != container->state.current_db_mode) {
3649          container->state.current_db_mode = db_mode;
3650          genX(cmd_buffer_emit_state_base_address)(container);
3651       } else {
3652          genX(cmd_buffer_emit_bt_pool_base_address)(container);
3653       }
3654 #else
3655       genX(cmd_buffer_emit_state_base_address)(container);
3656 #endif
3657    } else {
3658       genX(cmd_buffer_emit_bt_pool_base_address)(container);
3659    }
3660 
3661    /* Copy of utrace timestamp buffers from secondary into container */
3662    if (u_trace_enabled(&device->ds.trace_context)) {
3663       trace_intel_begin_trace_copy(&container->trace);
3664 
3665       struct anv_memcpy_state memcpy_state;
3666       genX(emit_so_memcpy_init)(&memcpy_state, device,
3667                                 container, &container->batch);
3668       uint32_t num_traces = 0;
3669       for (uint32_t i = 0; i < commandBufferCount; i++) {
3670          ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
3671 
3672          num_traces += secondary->trace.num_traces;
3673          u_trace_clone_append(u_trace_begin_iterator(&secondary->trace),
3674                               u_trace_end_iterator(&secondary->trace),
3675                               &container->trace,
3676                               &memcpy_state,
3677                               anv_device_utrace_emit_gfx_copy_buffer);
3678       }
3679       genX(emit_so_memcpy_fini)(&memcpy_state);
3680 
3681       trace_intel_end_trace_copy(&container->trace, num_traces);
3682 
3683       /* Memcpy is done using the 3D pipeline. */
3684       container->state.current_pipeline = _3D;
3685    }
3686 }
3687 
3688 static inline enum anv_pipe_bits
anv_pipe_flush_bits_for_access_flags(struct anv_cmd_buffer * cmd_buffer,VkAccessFlags2 flags)3689 anv_pipe_flush_bits_for_access_flags(struct anv_cmd_buffer *cmd_buffer,
3690                                      VkAccessFlags2 flags)
3691 {
3692    enum anv_pipe_bits pipe_bits = 0;
3693 
3694    u_foreach_bit64(b, flags) {
3695       switch ((VkAccessFlags2)BITFIELD64_BIT(b)) {
3696       case VK_ACCESS_2_SHADER_WRITE_BIT:
3697       case VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT:
3698       case VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR:
3699          /* We're transitioning a buffer that was previously used as write
3700           * destination through the data port. To make its content available
3701           * to future operations, flush the hdc pipeline.
3702           */
3703          pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3704          pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3705          break;
3706       case VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT:
3707          /* We're transitioning a buffer that was previously used as render
3708           * target. To make its content available to future operations, flush
3709           * the render target cache.
3710           */
3711          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
3712          break;
3713       case VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
3714          /* We're transitioning a buffer that was previously used as depth
3715           * buffer. To make its content available to future operations, flush
3716           * the depth cache.
3717           */
3718          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
3719          break;
3720       case VK_ACCESS_2_TRANSFER_WRITE_BIT:
3721          /* We're transitioning a buffer that was previously used as a
3722           * transfer write destination. Generic write operations include color
3723           * & depth operations as well as buffer operations like :
3724           *     - vkCmdClearColorImage()
3725           *     - vkCmdClearDepthStencilImage()
3726           *     - vkCmdBlitImage()
3727           *     - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
3728           *
3729           * Most of these operations are implemented using Blorp which writes
3730           * through the render target cache or the depth cache on the graphics
3731           * queue. On the compute queue, the writes are done through the data
3732           * port.
3733           */
3734          if (anv_cmd_buffer_is_compute_queue(cmd_buffer)) {
3735             pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3736             pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3737          } else {
3738             /* We can use the data port when trying to stay in compute mode on
3739              * the RCS.
3740              */
3741             pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3742             pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3743             /* Most operations are done through RT/detph writes */
3744             pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
3745             pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
3746          }
3747          break;
3748       case VK_ACCESS_2_MEMORY_WRITE_BIT:
3749          /* We're transitioning a buffer for generic write operations. Flush
3750           * all the caches.
3751           */
3752          pipe_bits |= ANV_PIPE_BARRIER_FLUSH_BITS;
3753          break;
3754       case VK_ACCESS_2_HOST_WRITE_BIT:
3755          /* We're transitioning a buffer for access by CPU. Invalidate
3756           * all the caches. Since data and tile caches don't have invalidate,
3757           * we are forced to flush those as well.
3758           */
3759          pipe_bits |= ANV_PIPE_BARRIER_FLUSH_BITS;
3760          pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
3761          break;
3762       case VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
3763       case VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT:
3764          /* We're transitioning a buffer written either from VS stage or from
3765           * the command streamer (see CmdEndTransformFeedbackEXT), we just
3766           * need to stall the CS.
3767           *
3768           * Streamout writes apparently bypassing L3, in order to make them
3769           * visible to the destination, we need to invalidate the other
3770           * caches.
3771           */
3772          pipe_bits |= ANV_PIPE_CS_STALL_BIT | ANV_PIPE_INVALIDATE_BITS;
3773          break;
3774       default:
3775          break; /* Nothing to do */
3776       }
3777    }
3778 
3779    return pipe_bits;
3780 }
3781 
3782 static inline enum anv_pipe_bits
anv_pipe_invalidate_bits_for_access_flags(struct anv_cmd_buffer * cmd_buffer,VkAccessFlags2 flags)3783 anv_pipe_invalidate_bits_for_access_flags(struct anv_cmd_buffer *cmd_buffer,
3784                                           VkAccessFlags2 flags)
3785 {
3786    struct anv_device *device = cmd_buffer->device;
3787    enum anv_pipe_bits pipe_bits = 0;
3788 
3789    u_foreach_bit64(b, flags) {
3790       switch ((VkAccessFlags2)BITFIELD64_BIT(b)) {
3791       case VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT:
3792          /* Indirect draw commands take a buffer as input that we're going to
3793           * read from the command streamer to load some of the HW registers
3794           * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
3795           * command streamer stall so that all the cache flushes have
3796           * completed before the command streamer loads from memory.
3797           */
3798          pipe_bits |=  ANV_PIPE_CS_STALL_BIT;
3799          if (device->info->ver == 9) {
3800             /* Indirect draw commands on Gfx9 also set gl_BaseVertex &
3801              * gl_BaseIndex through a vertex buffer, so invalidate that cache.
3802              */
3803             pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
3804          }
3805          /* For CmdDipatchIndirect, we load indirect gl_NumWorkGroups through
3806           * an A64 message, so we need to invalidate constant cache.
3807           */
3808          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
3809          /* Tile & Data cache flush needed For Cmd*Indirect* commands since
3810           * command streamer is not L3 coherent.
3811           */
3812          pipe_bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT |
3813                       ANV_PIPE_DATA_CACHE_FLUSH_BIT;
3814          break;
3815       case VK_ACCESS_2_INDEX_READ_BIT:
3816       case VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT:
3817          /* We transitioning a buffer to be used for as input for vkCmdDraw*
3818           * commands, so we invalidate the VF cache to make sure there is no
3819           * stale data when we start rendering.
3820           */
3821          pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
3822          break;
3823       case VK_ACCESS_2_UNIFORM_READ_BIT:
3824       case VK_ACCESS_2_SHADER_BINDING_TABLE_READ_BIT_KHR:
3825          /* We transitioning a buffer to be used as uniform data. Because
3826           * uniform is accessed through the data port & sampler, we need to
3827           * invalidate the texture cache (sampler) & constant cache (data
3828           * port) to avoid stale data.
3829           */
3830          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
3831          if (device->physical->compiler->indirect_ubos_use_sampler) {
3832             pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
3833          } else {
3834             pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3835             pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3836          }
3837          break;
3838       case VK_ACCESS_2_INPUT_ATTACHMENT_READ_BIT:
3839       case VK_ACCESS_2_TRANSFER_READ_BIT:
3840       case VK_ACCESS_2_SHADER_SAMPLED_READ_BIT:
3841          /* Transitioning a buffer to be read through the sampler, so
3842           * invalidate the texture cache, we don't want any stale data.
3843           */
3844          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
3845          break;
3846       case VK_ACCESS_2_SHADER_READ_BIT:
3847          /* Same as VK_ACCESS_2_UNIFORM_READ_BIT and
3848           * VK_ACCESS_2_SHADER_SAMPLED_READ_BIT cases above
3849           */
3850          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT |
3851                       ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
3852          if (!device->physical->compiler->indirect_ubos_use_sampler) {
3853             pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3854             pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3855          }
3856          break;
3857       case VK_ACCESS_2_MEMORY_READ_BIT:
3858          /* Transitioning a buffer for generic read, invalidate all the
3859           * caches.
3860           */
3861          pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
3862          break;
3863       case VK_ACCESS_2_MEMORY_WRITE_BIT:
3864          /* Generic write, make sure all previously written things land in
3865           * memory.
3866           */
3867          pipe_bits |= ANV_PIPE_BARRIER_FLUSH_BITS;
3868          break;
3869       case VK_ACCESS_2_CONDITIONAL_RENDERING_READ_BIT_EXT:
3870       case VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT:
3871          /* Transitioning a buffer for conditional rendering or transform
3872           * feedback. We'll load the content of this buffer into HW registers
3873           * using the command streamer, so we need to stall the command
3874           * streamer , so we need to stall the command streamer to make sure
3875           * any in-flight flush operations have completed.
3876           */
3877          pipe_bits |= ANV_PIPE_CS_STALL_BIT;
3878          pipe_bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
3879          pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
3880          break;
3881       case VK_ACCESS_2_HOST_READ_BIT:
3882          /* We're transitioning a buffer that was written by CPU.  Flush
3883           * all the caches.
3884           */
3885          pipe_bits |= ANV_PIPE_BARRIER_FLUSH_BITS;
3886          break;
3887       case VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT:
3888          /* We're transitioning a buffer to be written by the streamout fixed
3889           * function. This one is apparently not L3 coherent, so we need a
3890           * tile cache flush to make sure any previous write is not going to
3891           * create WaW hazards.
3892           */
3893          pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
3894          pipe_bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
3895          break;
3896       case VK_ACCESS_2_SHADER_STORAGE_READ_BIT:
3897          /* VK_ACCESS_2_SHADER_STORAGE_READ_BIT specifies read access to a
3898           * storage buffer, physical storage buffer, storage texel buffer, or
3899           * storage image in any shader pipeline stage.
3900           *
3901           * Any storage buffers or images written to must be invalidated and
3902           * flushed before the shader can access them.
3903           *
3904           * Both HDC & Untyped flushes also do invalidation. This is why we
3905           * use this here on Gfx12+.
3906           *
3907           * Gfx11 and prior don't have HDC. Only Data cache flush is available
3908           * and it only operates on the written cache lines.
3909           */
3910          if (device->info->ver >= 12) {
3911             pipe_bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
3912             pipe_bits |= ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
3913          }
3914          break;
3915       case VK_ACCESS_2_DESCRIPTOR_BUFFER_READ_BIT_EXT:
3916          pipe_bits |= ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
3917          break;
3918       default:
3919          break; /* Nothing to do */
3920       }
3921    }
3922 
3923    return pipe_bits;
3924 }
3925 
3926 static inline bool
stage_is_shader(const VkPipelineStageFlags2 stage)3927 stage_is_shader(const VkPipelineStageFlags2 stage)
3928 {
3929    return (stage & (VK_PIPELINE_STAGE_2_VERTEX_SHADER_BIT |
3930                     VK_PIPELINE_STAGE_2_TESSELLATION_CONTROL_SHADER_BIT |
3931                     VK_PIPELINE_STAGE_2_TESSELLATION_EVALUATION_SHADER_BIT |
3932                     VK_PIPELINE_STAGE_2_GEOMETRY_SHADER_BIT |
3933                     VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT |
3934                     VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT |
3935                     VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT |
3936                     VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT |
3937                     VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR |
3938                     VK_PIPELINE_STAGE_2_TASK_SHADER_BIT_EXT |
3939                     VK_PIPELINE_STAGE_2_MESH_SHADER_BIT_EXT));
3940 }
3941 
3942 static inline bool
stage_is_transfer(const VkPipelineStageFlags2 stage)3943 stage_is_transfer(const VkPipelineStageFlags2 stage)
3944 {
3945    return (stage & (VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT |
3946                     VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT));
3947 }
3948 
3949 static inline bool
stage_is_video(const VkPipelineStageFlags2 stage)3950 stage_is_video(const VkPipelineStageFlags2 stage)
3951 {
3952    return (stage & (VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT |
3953 #ifdef VK_ENABLE_BETA_EXTENSIONS
3954                     VK_PIPELINE_STAGE_2_VIDEO_ENCODE_BIT_KHR |
3955 #endif
3956                     VK_PIPELINE_STAGE_2_VIDEO_DECODE_BIT_KHR));
3957 }
3958 
3959 static inline bool
mask_is_shader_write(const VkAccessFlags2 access)3960 mask_is_shader_write(const VkAccessFlags2 access)
3961 {
3962    return (access & (VK_ACCESS_2_SHADER_WRITE_BIT |
3963                      VK_ACCESS_2_MEMORY_WRITE_BIT |
3964                      VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT));
3965 }
3966 
3967 static inline bool
mask_is_write(const VkAccessFlags2 access)3968 mask_is_write(const VkAccessFlags2 access)
3969 {
3970    return access & (VK_ACCESS_2_SHADER_WRITE_BIT |
3971                     VK_ACCESS_2_COLOR_ATTACHMENT_WRITE_BIT |
3972                     VK_ACCESS_2_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
3973                     VK_ACCESS_2_TRANSFER_WRITE_BIT |
3974                     VK_ACCESS_2_HOST_WRITE_BIT |
3975                     VK_ACCESS_2_MEMORY_WRITE_BIT |
3976                     VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT |
3977                     VK_ACCESS_2_VIDEO_DECODE_WRITE_BIT_KHR |
3978 #ifdef VK_ENABLE_BETA_EXTENSIONS
3979                     VK_ACCESS_2_VIDEO_ENCODE_WRITE_BIT_KHR |
3980 #endif
3981                     VK_ACCESS_2_TRANSFORM_FEEDBACK_WRITE_BIT_EXT |
3982                     VK_ACCESS_2_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
3983                     VK_ACCESS_2_COMMAND_PREPROCESS_WRITE_BIT_NV |
3984                     VK_ACCESS_2_ACCELERATION_STRUCTURE_WRITE_BIT_KHR |
3985                     VK_ACCESS_2_MICROMAP_WRITE_BIT_EXT |
3986                     VK_ACCESS_2_OPTICAL_FLOW_WRITE_BIT_NV);
3987 }
3988 
3989 static inline bool
mask_is_transfer_write(const VkAccessFlags2 access)3990 mask_is_transfer_write(const VkAccessFlags2 access)
3991 {
3992    return access & (VK_ACCESS_2_TRANSFER_WRITE_BIT |
3993                     VK_ACCESS_2_MEMORY_WRITE_BIT);
3994 }
3995 
3996 static void
cmd_buffer_barrier_video(struct anv_cmd_buffer * cmd_buffer,uint32_t n_dep_infos,const VkDependencyInfo * dep_infos)3997 cmd_buffer_barrier_video(struct anv_cmd_buffer *cmd_buffer,
3998                          uint32_t n_dep_infos,
3999                          const VkDependencyInfo *dep_infos)
4000 {
4001    assert(anv_cmd_buffer_is_video_queue(cmd_buffer));
4002 
4003    bool flush_llc = false;
4004    bool flush_ccs = false;
4005 
4006    for (uint32_t d = 0; d < n_dep_infos; d++) {
4007       const VkDependencyInfo *dep_info = &dep_infos[d];
4008 
4009 
4010       for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; i++) {
4011          const VkImageMemoryBarrier2 *img_barrier =
4012             &dep_info->pImageMemoryBarriers[i];
4013 
4014          ANV_FROM_HANDLE(anv_image, image, img_barrier->image);
4015          const VkImageSubresourceRange *range = &img_barrier->subresourceRange;
4016 
4017          /* If srcQueueFamilyIndex is not equal to dstQueueFamilyIndex, this
4018           * memory barrier defines a queue family ownership transfer.
4019           */
4020          if (img_barrier->srcQueueFamilyIndex != img_barrier->dstQueueFamilyIndex)
4021             flush_llc = true;
4022 
4023          VkImageAspectFlags img_aspects =
4024             vk_image_expand_aspect_mask(&image->vk, range->aspectMask);
4025          anv_foreach_image_aspect_bit(aspect_bit, image, img_aspects) {
4026             const uint32_t plane =
4027                anv_image_aspect_to_plane(image, 1UL << aspect_bit);
4028             if (isl_aux_usage_has_ccs(image->planes[plane].aux_usage)) {
4029                flush_ccs = true;
4030             }
4031          }
4032       }
4033 
4034       for (uint32_t i = 0; i < dep_info->bufferMemoryBarrierCount; i++) {
4035          /* Flush the cache if something is written by the video operations and
4036           * used by any other stages except video encode/decode stages or if
4037           * srcQueueFamilyIndex is not equal to dstQueueFamilyIndex, this memory
4038           * barrier defines a queue family ownership transfer.
4039           */
4040          if ((stage_is_video(dep_info->pBufferMemoryBarriers[i].srcStageMask) &&
4041               mask_is_write(dep_info->pBufferMemoryBarriers[i].srcAccessMask) &&
4042               !stage_is_video(dep_info->pBufferMemoryBarriers[i].dstStageMask)) ||
4043              (dep_info->pBufferMemoryBarriers[i].srcQueueFamilyIndex !=
4044               dep_info->pBufferMemoryBarriers[i].dstQueueFamilyIndex)) {
4045             flush_llc = true;
4046             break;
4047          }
4048       }
4049 
4050       for (uint32_t i = 0; i < dep_info->memoryBarrierCount; i++) {
4051          /* Flush the cache if something is written by the video operations and
4052           * used by any other stages except video encode/decode stage.
4053           */
4054          if (stage_is_video(dep_info->pMemoryBarriers[i].srcStageMask) &&
4055              mask_is_write(dep_info->pMemoryBarriers[i].srcAccessMask) &&
4056              !stage_is_video(dep_info->pMemoryBarriers[i].dstStageMask)) {
4057             flush_llc = true;
4058             break;
4059          }
4060       }
4061 
4062       /* We cannot gather more information than that. */
4063       if (flush_ccs && flush_llc)
4064          break;
4065    }
4066 
4067    if (flush_ccs || flush_llc) {
4068       anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), fd) {
4069 #if GFX_VERx10 >= 125
4070          fd.FlushCCS = flush_ccs;
4071 #endif
4072 #if GFX_VER >= 12
4073          /* Using this bit on Gfx9 triggers a GPU hang.
4074           * This is undocumented behavior. Gfx12 seems fine.
4075           * TODO: check Gfx11
4076           */
4077          fd.FlushLLC = flush_llc;
4078 #endif
4079       }
4080    }
4081 }
4082 
4083 static void
cmd_buffer_barrier_blitter(struct anv_cmd_buffer * cmd_buffer,uint32_t n_dep_infos,const VkDependencyInfo * dep_infos)4084 cmd_buffer_barrier_blitter(struct anv_cmd_buffer *cmd_buffer,
4085                            uint32_t n_dep_infos,
4086                            const VkDependencyInfo *dep_infos)
4087 {
4088 #if GFX_VERx10 >= 125
4089    assert(anv_cmd_buffer_is_blitter_queue(cmd_buffer));
4090 
4091    /* The blitter requires an MI_FLUSH_DW command when a buffer transitions
4092     * from being a destination to a source.
4093     */
4094    bool flush_llc = false;
4095    bool flush_ccs = false;
4096 
4097    for (uint32_t d = 0; d < n_dep_infos; d++) {
4098       const VkDependencyInfo *dep_info = &dep_infos[d];
4099 
4100       for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; i++) {
4101          const VkImageMemoryBarrier2 *img_barrier =
4102             &dep_info->pImageMemoryBarriers[i];
4103 
4104          ANV_FROM_HANDLE(anv_image, image, img_barrier->image);
4105          const VkImageSubresourceRange *range = &img_barrier->subresourceRange;
4106 
4107          /* If srcQueueFamilyIndex is not equal to dstQueueFamilyIndex, this
4108           * memory barrier defines a queue family transfer operation.
4109           */
4110          if (img_barrier->srcQueueFamilyIndex != img_barrier->dstQueueFamilyIndex)
4111             flush_llc = true;
4112 
4113          /* Flush cache if transfer command reads the output of the previous
4114           * transfer command, ideally we should just wait for the completion
4115           * but for now just flush the cache to make the data visible.
4116           */
4117          if ((img_barrier->oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL ||
4118               img_barrier->oldLayout == VK_IMAGE_LAYOUT_GENERAL) &&
4119              (img_barrier->newLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ||
4120               img_barrier->newLayout == VK_IMAGE_LAYOUT_GENERAL)) {
4121             flush_llc = true;
4122          }
4123 
4124          VkImageAspectFlags img_aspects =
4125             vk_image_expand_aspect_mask(&image->vk, range->aspectMask);
4126          anv_foreach_image_aspect_bit(aspect_bit, image, img_aspects) {
4127             const uint32_t plane =
4128                anv_image_aspect_to_plane(image, 1UL << aspect_bit);
4129             if (isl_aux_usage_has_ccs(image->planes[plane].aux_usage)) {
4130                flush_ccs = true;
4131             }
4132          }
4133       }
4134 
4135       for (uint32_t i = 0; i < dep_info->bufferMemoryBarrierCount; i++) {
4136          /* Flush the cache if something is written by the transfer command
4137           * and used by any other stages except transfer stage or if
4138           * srcQueueFamilyIndex is not equal to dstQueueFamilyIndex, this
4139           * memory barrier defines a queue family transfer operation.
4140           */
4141          if ((stage_is_transfer(dep_info->pBufferMemoryBarriers[i].srcStageMask) &&
4142               mask_is_write(dep_info->pBufferMemoryBarriers[i].srcAccessMask)) ||
4143              (dep_info->pBufferMemoryBarriers[i].srcQueueFamilyIndex !=
4144               dep_info->pBufferMemoryBarriers[i].dstQueueFamilyIndex)) {
4145             flush_llc = true;
4146             break;
4147          }
4148       }
4149 
4150       for (uint32_t i = 0; i < dep_info->memoryBarrierCount; i++) {
4151          /* Flush the cache if something is written by the transfer command
4152           * and used by any other stages except transfer stage.
4153           */
4154          if (stage_is_transfer(dep_info->pMemoryBarriers[i].srcStageMask) &&
4155              mask_is_write(dep_info->pMemoryBarriers[i].srcAccessMask)) {
4156             flush_llc = true;
4157             break;
4158          }
4159       }
4160 
4161       /* We cannot gather more information than that. */
4162       if (flush_ccs && flush_llc)
4163          break;
4164    }
4165 
4166    if (flush_ccs || flush_llc) {
4167       /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
4168       if (intel_needs_workaround(cmd_buffer->device->info, 16018063123)) {
4169          genX(batch_emit_fast_color_dummy_blit)(&cmd_buffer->batch,
4170                                                 cmd_buffer->device);
4171       }
4172       anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), fd) {
4173          fd.FlushCCS = flush_ccs;
4174          fd.FlushLLC = flush_llc;
4175       }
4176    }
4177 #endif
4178 }
4179 
4180 static inline bool
cmd_buffer_has_pending_copy_query(struct anv_cmd_buffer * cmd_buffer)4181 cmd_buffer_has_pending_copy_query(struct anv_cmd_buffer *cmd_buffer)
4182 {
4183    /* Query copies are only written with dataport, so we only need to check
4184     * that flag.
4185     */
4186    return (cmd_buffer->state.queries.buffer_write_bits &
4187            ANV_QUERY_WRITES_DATA_FLUSH) != 0;
4188 }
4189 
4190 static void
cmd_buffer_accumulate_barrier_bits(struct anv_cmd_buffer * cmd_buffer,uint32_t n_dep_infos,const VkDependencyInfo * dep_infos,VkPipelineStageFlags2 * out_src_stages,VkPipelineStageFlags2 * out_dst_stages,enum anv_pipe_bits * out_bits)4191 cmd_buffer_accumulate_barrier_bits(struct anv_cmd_buffer *cmd_buffer,
4192                                    uint32_t n_dep_infos,
4193                                    const VkDependencyInfo *dep_infos,
4194                                    VkPipelineStageFlags2 *out_src_stages,
4195                                    VkPipelineStageFlags2 *out_dst_stages,
4196                                    enum anv_pipe_bits *out_bits)
4197 {
4198    /* XXX: Right now, we're really dumb and just flush whatever categories
4199     * the app asks for. One of these days we may make this a bit better but
4200     * right now that's all the hardware allows for in most areas.
4201     */
4202    VkAccessFlags2 src_flags = 0;
4203    VkAccessFlags2 dst_flags = 0;
4204 
4205    VkPipelineStageFlags2 src_stages = 0;
4206    VkPipelineStageFlags2 dst_stages = 0;
4207 
4208 #if GFX_VER < 20
4209    bool apply_sparse_flushes = false;
4210    struct anv_device *device = cmd_buffer->device;
4211 #endif
4212    bool flush_query_copies = false;
4213 
4214    for (uint32_t d = 0; d < n_dep_infos; d++) {
4215       const VkDependencyInfo *dep_info = &dep_infos[d];
4216 
4217       for (uint32_t i = 0; i < dep_info->memoryBarrierCount; i++) {
4218          src_flags |= dep_info->pMemoryBarriers[i].srcAccessMask;
4219          dst_flags |= dep_info->pMemoryBarriers[i].dstAccessMask;
4220 
4221          src_stages |= dep_info->pMemoryBarriers[i].srcStageMask;
4222          dst_stages |= dep_info->pMemoryBarriers[i].dstStageMask;
4223 
4224          /* Shader writes to buffers that could then be written by a transfer
4225           * command (including queries).
4226           */
4227          if (stage_is_shader(dep_info->pMemoryBarriers[i].srcStageMask) &&
4228              mask_is_shader_write(dep_info->pMemoryBarriers[i].srcAccessMask) &&
4229              stage_is_transfer(dep_info->pMemoryBarriers[i].dstStageMask)) {
4230             cmd_buffer->state.queries.buffer_write_bits |=
4231                ANV_QUERY_COMPUTE_WRITES_PENDING_BITS;
4232          }
4233 
4234          if (stage_is_transfer(dep_info->pMemoryBarriers[i].srcStageMask) &&
4235              mask_is_transfer_write(dep_info->pMemoryBarriers[i].srcAccessMask) &&
4236              cmd_buffer_has_pending_copy_query(cmd_buffer))
4237             flush_query_copies = true;
4238 
4239 #if GFX_VER < 20
4240          /* There's no way of knowing if this memory barrier is related to
4241           * sparse buffers! This is pretty horrible.
4242           */
4243          if (mask_is_write(src_flags) &&
4244              p_atomic_read(&device->num_sparse_resources) > 0)
4245             apply_sparse_flushes = true;
4246 #endif
4247       }
4248 
4249       for (uint32_t i = 0; i < dep_info->bufferMemoryBarrierCount; i++) {
4250          const VkBufferMemoryBarrier2 *buf_barrier =
4251             &dep_info->pBufferMemoryBarriers[i];
4252 
4253          src_flags |= buf_barrier->srcAccessMask;
4254          dst_flags |= buf_barrier->dstAccessMask;
4255 
4256          src_stages |= buf_barrier->srcStageMask;
4257          dst_stages |= buf_barrier->dstStageMask;
4258 
4259          /* Shader writes to buffers that could then be written by a transfer
4260           * command (including queries).
4261           */
4262          if (stage_is_shader(buf_barrier->srcStageMask) &&
4263              mask_is_shader_write(buf_barrier->srcAccessMask) &&
4264              stage_is_transfer(buf_barrier->dstStageMask)) {
4265             cmd_buffer->state.queries.buffer_write_bits |=
4266                ANV_QUERY_COMPUTE_WRITES_PENDING_BITS;
4267          }
4268 
4269          if (stage_is_transfer(buf_barrier->srcStageMask) &&
4270              mask_is_transfer_write(buf_barrier->srcAccessMask) &&
4271              cmd_buffer_has_pending_copy_query(cmd_buffer))
4272             flush_query_copies = true;
4273 
4274 #if GFX_VER < 20
4275          ANV_FROM_HANDLE(anv_buffer, buffer, buf_barrier->buffer);
4276 
4277          if (anv_buffer_is_sparse(buffer) && mask_is_write(src_flags))
4278             apply_sparse_flushes = true;
4279 #endif
4280       }
4281 
4282       for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; i++) {
4283          const VkImageMemoryBarrier2 *img_barrier =
4284             &dep_info->pImageMemoryBarriers[i];
4285 
4286          src_flags |= img_barrier->srcAccessMask;
4287          dst_flags |= img_barrier->dstAccessMask;
4288 
4289          src_stages |= img_barrier->srcStageMask;
4290          dst_stages |= img_barrier->dstStageMask;
4291 
4292          ANV_FROM_HANDLE(anv_image, image, img_barrier->image);
4293          const VkImageSubresourceRange *range = &img_barrier->subresourceRange;
4294 
4295          uint32_t base_layer, layer_count;
4296          if (image->vk.image_type == VK_IMAGE_TYPE_3D) {
4297             base_layer = 0;
4298             layer_count = u_minify(image->vk.extent.depth, range->baseMipLevel);
4299          } else {
4300             base_layer = range->baseArrayLayer;
4301             layer_count = vk_image_subresource_layer_count(&image->vk, range);
4302          }
4303          const uint32_t level_count =
4304             vk_image_subresource_level_count(&image->vk, range);
4305 
4306          VkImageLayout old_layout = img_barrier->oldLayout;
4307          VkImageLayout new_layout = img_barrier->newLayout;
4308 
4309          /* If we're inside a render pass, the runtime might have converted
4310           * some layouts from GENERAL to FEEDBACK_LOOP. Check if that's the
4311           * case and reconvert back to the original layout so that application
4312           * barriers within renderpass are operating with consistent layouts.
4313           */
4314          if (!cmd_buffer->vk.runtime_rp_barrier &&
4315              cmd_buffer->vk.render_pass != NULL &&
4316              old_layout == VK_IMAGE_LAYOUT_ATTACHMENT_FEEDBACK_LOOP_OPTIMAL_EXT) {
4317             /* Those assert are here to recognize the changes made by the
4318              * runtime. If we fail them, we need to investigate what is going
4319              * on.
4320              */
4321             assert(anv_cmd_graphics_state_has_image_as_attachment(&cmd_buffer->state.gfx,
4322                                                                   image));
4323             VkImageLayout subpass_att_layout, subpass_stencil_att_layout;
4324 
4325             vk_command_buffer_get_attachment_layout(
4326                &cmd_buffer->vk, &image->vk,
4327                &subpass_att_layout, &subpass_stencil_att_layout);
4328 
4329             old_layout = subpass_att_layout;
4330             new_layout = subpass_att_layout;
4331          }
4332 
4333          if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
4334             transition_depth_buffer(cmd_buffer, image,
4335                                     range->baseMipLevel, level_count,
4336                                     base_layer, layer_count,
4337                                     old_layout, new_layout,
4338                                     false /* will_full_fast_clear */);
4339          }
4340 
4341          if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
4342             transition_stencil_buffer(cmd_buffer, image,
4343                                       range->baseMipLevel, level_count,
4344                                       base_layer, layer_count,
4345                                       old_layout, new_layout,
4346                                       false /* will_full_fast_clear */);
4347          }
4348 
4349          if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
4350             VkImageAspectFlags color_aspects =
4351                vk_image_expand_aspect_mask(&image->vk, range->aspectMask);
4352             anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
4353                transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
4354                                        range->baseMipLevel, level_count,
4355                                        base_layer, layer_count,
4356                                        old_layout, new_layout,
4357                                        img_barrier->srcQueueFamilyIndex,
4358                                        img_barrier->dstQueueFamilyIndex,
4359                                        false /* will_full_fast_clear */);
4360             }
4361          }
4362 #if GFX_VER < 20
4363          /* Mark image as compressed if the destination layout has untracked
4364           * writes to the aux surface.
4365           */
4366          VkImageAspectFlags aspects =
4367             vk_image_expand_aspect_mask(&image->vk, range->aspectMask);
4368          anv_foreach_image_aspect_bit(aspect_bit, image, aspects) {
4369             VkImageAspectFlagBits aspect = 1UL << aspect_bit;
4370             if (anv_layout_has_untracked_aux_writes(
4371                    device->info,
4372                    image, aspect,
4373                    img_barrier->newLayout,
4374                    cmd_buffer->queue_family->queueFlags)) {
4375                for (uint32_t l = 0; l < level_count; l++) {
4376                   const uint32_t level = range->baseMipLevel + l;
4377                   const uint32_t aux_layers =
4378                      anv_image_aux_layers(image, aspect, level);
4379 
4380                   if (base_layer >= aux_layers)
4381                      break; /* We will only get fewer layers as level increases */
4382 
4383                   uint32_t level_layer_count =
4384                      MIN2(layer_count, aux_layers - base_layer);
4385 
4386                   set_image_compressed_bit(cmd_buffer, image, aspect,
4387                                            level,
4388                                            base_layer, level_layer_count,
4389                                            true);
4390                }
4391             }
4392          }
4393 
4394          if (anv_image_is_sparse(image) && mask_is_write(src_flags))
4395             apply_sparse_flushes = true;
4396 #endif
4397       }
4398    }
4399 
4400    enum anv_pipe_bits bits =
4401       anv_pipe_flush_bits_for_access_flags(cmd_buffer, src_flags) |
4402       anv_pipe_invalidate_bits_for_access_flags(cmd_buffer, dst_flags);
4403 
4404    /* What stage require a stall at pixel scoreboard */
4405    VkPipelineStageFlags2 pb_stall_stages =
4406       VK_PIPELINE_STAGE_2_FRAGMENT_SHADER_BIT |
4407       VK_PIPELINE_STAGE_2_COLOR_ATTACHMENT_OUTPUT_BIT |
4408       VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT |
4409       VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT |
4410       VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
4411    if (anv_cmd_buffer_is_render_queue(cmd_buffer)) {
4412       /* On a render queue, the following stages can also use a pixel shader.
4413        */
4414       pb_stall_stages |=
4415          VK_PIPELINE_STAGE_2_TRANSFER_BIT |
4416          VK_PIPELINE_STAGE_2_RESOLVE_BIT |
4417          VK_PIPELINE_STAGE_2_BLIT_BIT |
4418          VK_PIPELINE_STAGE_2_CLEAR_BIT;
4419    }
4420    VkPipelineStageFlags2 cs_stall_stages =
4421       VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT |
4422       VK_PIPELINE_STAGE_2_COMPUTE_SHADER_BIT |
4423       VK_PIPELINE_STAGE_2_ACCELERATION_STRUCTURE_BUILD_BIT_KHR |
4424       VK_PIPELINE_STAGE_2_RAY_TRACING_SHADER_BIT_KHR |
4425       VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT;
4426    if (anv_cmd_buffer_is_compute_queue(cmd_buffer)) {
4427       /* On a compute queue, the following stages can also use a compute
4428        * shader.
4429        */
4430       cs_stall_stages |=
4431          VK_PIPELINE_STAGE_2_TRANSFER_BIT |
4432          VK_PIPELINE_STAGE_2_RESOLVE_BIT |
4433          VK_PIPELINE_STAGE_2_BLIT_BIT |
4434          VK_PIPELINE_STAGE_2_CLEAR_BIT;
4435    } else if (anv_cmd_buffer_is_render_queue(cmd_buffer) &&
4436               cmd_buffer->state.current_pipeline == GPGPU) {
4437       /* In GPGPU mode, the render queue can also use a compute shader for
4438        * transfer operations.
4439        */
4440       cs_stall_stages |= VK_PIPELINE_STAGE_2_TRANSFER_BIT;
4441    }
4442 
4443    /* Prior to Gfx20, we can restrict pb-stall/cs-stall to some pipeline
4444     * modes. Gfx20 doesn't do pipeline switches so we have to assume the worse
4445     * case.
4446     */
4447    const bool needs_pb_stall =
4448       anv_cmd_buffer_is_render_queue(cmd_buffer) &&
4449 #if GFX_VER < 20
4450       cmd_buffer->state.current_pipeline == _3D &&
4451 #endif
4452       (src_stages & pb_stall_stages);
4453    if (needs_pb_stall) {
4454       bits |= GFX_VERx10 >= 125 ?
4455               ANV_PIPE_PSS_STALL_SYNC_BIT :
4456               ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
4457    }
4458    const bool needs_cs_stall =
4459       anv_cmd_buffer_is_render_or_compute_queue(cmd_buffer) &&
4460 #if GFX_VER < 20
4461       cmd_buffer->state.current_pipeline == GPGPU &&
4462 #endif
4463       (src_stages & cs_stall_stages);
4464    if (needs_cs_stall)
4465       bits |= ANV_PIPE_CS_STALL_BIT;
4466 
4467 #if GFX_VER < 20
4468    /* Our HW implementation of the sparse feature prior to Xe2 lives in the
4469     * GAM unit (interface between all the GPU caches and external memory).
4470     * As a result writes to NULL bound images & buffers that should be
4471     * ignored are actually still visible in the caches. The only way for us
4472     * to get correct NULL bound regions to return 0s is to evict the caches
4473     * to force the caches to be repopulated with 0s.
4474     *
4475     * Our understanding is that Xe2 started to tag the L3 cache with some
4476     * kind physical address information rather. It is therefore able to
4477     * detect that a cache line in the cache is going to a null tile and so
4478     * the L3 cache also has a sparse compatible behavior and we don't need
4479     * to flush anymore.
4480     */
4481    if (apply_sparse_flushes)
4482       bits |= ANV_PIPE_BARRIER_FLUSH_BITS;
4483 #endif
4484 
4485    /* Copies from query pools are executed with a shader writing through the
4486     * dataport.
4487     */
4488    if (flush_query_copies) {
4489       bits |= (GFX_VER >= 12 ?
4490                ANV_PIPE_HDC_PIPELINE_FLUSH_BIT : ANV_PIPE_DATA_CACHE_FLUSH_BIT);
4491    }
4492 
4493    if (dst_flags & VK_ACCESS_INDIRECT_COMMAND_READ_BIT)
4494       genX(cmd_buffer_flush_generated_draws)(cmd_buffer);
4495 
4496    *out_src_stages = src_stages;
4497    *out_dst_stages = dst_stages;
4498    *out_bits = bits;
4499 }
4500 
4501 static void
cmd_buffer_barrier(struct anv_cmd_buffer * cmd_buffer,uint32_t n_dep_infos,const VkDependencyInfo * dep_infos,const char * reason)4502 cmd_buffer_barrier(struct anv_cmd_buffer *cmd_buffer,
4503                    uint32_t n_dep_infos,
4504                    const VkDependencyInfo *dep_infos,
4505                    const char *reason)
4506 {
4507    switch (cmd_buffer->batch.engine_class) {
4508    case INTEL_ENGINE_CLASS_VIDEO:
4509       cmd_buffer_barrier_video(cmd_buffer, n_dep_infos, dep_infos);
4510       break;
4511 
4512    case INTEL_ENGINE_CLASS_COPY:
4513       cmd_buffer_barrier_blitter(cmd_buffer, n_dep_infos, dep_infos);
4514       break;
4515 
4516    case INTEL_ENGINE_CLASS_RENDER:
4517    case INTEL_ENGINE_CLASS_COMPUTE: {
4518       VkPipelineStageFlags2 src_stages, dst_stages;
4519       enum anv_pipe_bits bits;
4520       cmd_buffer_accumulate_barrier_bits(cmd_buffer, n_dep_infos, dep_infos,
4521                                          &src_stages, &dst_stages, &bits);
4522 
4523       anv_add_pending_pipe_bits(cmd_buffer, bits, reason);
4524       break;
4525    }
4526 
4527    default:
4528       unreachable("Invalid engine class");
4529    }
4530 }
4531 
genX(CmdPipelineBarrier2)4532 void genX(CmdPipelineBarrier2)(
4533     VkCommandBuffer                             commandBuffer,
4534     const VkDependencyInfo*                     pDependencyInfo)
4535 {
4536    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4537 
4538    cmd_buffer_barrier(cmd_buffer, 1, pDependencyInfo, "pipe barrier");
4539 }
4540 
4541 void
genX(batch_emit_breakpoint)4542 genX(batch_emit_breakpoint)(struct anv_batch *batch,
4543                             struct anv_device *device,
4544                             bool emit_before_draw)
4545 {
4546    /* Update draw call count once */
4547    uint32_t draw_count = emit_before_draw ?
4548                          p_atomic_inc_return(&device->draw_call_count) :
4549                          p_atomic_read(&device->draw_call_count);
4550 
4551    if (((draw_count == intel_debug_bkp_before_draw_count &&
4552         emit_before_draw) ||
4553        (draw_count == intel_debug_bkp_after_draw_count &&
4554         !emit_before_draw))) {
4555       struct anv_address wait_addr =
4556          anv_state_pool_state_address(&device->dynamic_state_pool,
4557                                       device->breakpoint);
4558 
4559       anv_batch_emit(batch, GENX(MI_SEMAPHORE_WAIT), sem) {
4560          sem.WaitMode            = PollingMode;
4561          sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD;
4562          sem.SemaphoreDataDword  = 0x1;
4563          sem.SemaphoreAddress    = wait_addr;
4564       };
4565    }
4566 }
4567 
4568 /* Only emit PIPELINE_SELECT, for the whole mode switch and flushing use
4569  * flush_pipeline_select()
4570  */
4571 void
genX(emit_pipeline_select)4572 genX(emit_pipeline_select)(struct anv_batch *batch, uint32_t pipeline,
4573                            const struct anv_device *device)
4574 {
4575    /* Bspec 55860: Xe2+ no longer requires PIPELINE_SELECT */
4576 #if GFX_VER < 20
4577    anv_batch_emit(batch, GENX(PIPELINE_SELECT), ps) {
4578       ps.MaskBits = GFX_VERx10 >= 125 ? 0x93 : GFX_VER >= 12 ? 0x13 : 0x3;
4579 #if GFX_VER == 12
4580       ps.MediaSamplerDOPClockGateEnable = true;
4581 #endif
4582       ps.PipelineSelection = pipeline;
4583 #if GFX_VERx10 == 125
4584       /* It might still be better to only enable this when the compute
4585        * pipeline will have DPAS instructions.
4586        */
4587       ps.SystolicModeEnable = pipeline == GPGPU &&
4588          device->vk.enabled_extensions.KHR_cooperative_matrix &&
4589          device->vk.enabled_features.cooperativeMatrix;
4590 #endif
4591    }
4592 #endif /* if GFX_VER < 20 */
4593 }
4594 
4595 static void
genX(flush_pipeline_select)4596 genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
4597                             uint32_t pipeline)
4598 {
4599    UNUSED const struct intel_device_info *devinfo = cmd_buffer->device->info;
4600 
4601    if (cmd_buffer->state.current_pipeline == pipeline)
4602       return;
4603 
4604 #if GFX_VER == 9
4605    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
4606     *
4607     *   Software must clear the COLOR_CALC_STATE Valid field in
4608     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
4609     *   with Pipeline Select set to GPGPU.
4610     *
4611     * The internal hardware docs recommend the same workaround for Gfx9
4612     * hardware too.
4613     */
4614    if (pipeline == GPGPU)
4615       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
4616 #endif
4617 
4618 #if GFX_VERx10 == 120
4619    /* Undocumented workaround to force the re-emission of
4620     * MEDIA_INTERFACE_DESCRIPTOR_LOAD when switching from 3D to Compute
4621     * pipeline without rebinding a pipeline :
4622     *    vkCmdBindPipeline(COMPUTE, cs_pipeline);
4623     *    vkCmdDispatch(...);
4624     *    vkCmdBindPipeline(GRAPHICS, gfx_pipeline);
4625     *    vkCmdDraw(...);
4626     *    vkCmdDispatch(...);
4627     */
4628    if (pipeline == _3D)
4629       cmd_buffer->state.compute.pipeline_dirty = true;
4630 #endif
4631 
4632    /* We apparently cannot flush the tile cache (color/depth) from the GPGPU
4633     * pipeline. That means query clears will not be visible to query
4634     * copy/write. So we need to flush it before going to GPGPU mode.
4635     */
4636    if (cmd_buffer->state.current_pipeline == _3D &&
4637        cmd_buffer->state.queries.clear_bits) {
4638       anv_add_pending_pipe_bits(cmd_buffer,
4639                                 ANV_PIPE_QUERY_BITS(cmd_buffer->state.queries.clear_bits),
4640                                 "query clear flush prior to GPGPU");
4641    }
4642 
4643    /* Flush and invalidate bits done needed prior PIPELINE_SELECT. */
4644    enum anv_pipe_bits bits = 0;
4645 
4646 #if GFX_VER >= 12
4647    /* From Tigerlake PRM, Volume 2a, PIPELINE_SELECT:
4648     *
4649     *   "Software must ensure Render Cache, Depth Cache and HDC Pipeline flush
4650     *   are flushed through a stalling PIPE_CONTROL command prior to
4651     *   programming of PIPELINE_SELECT command transitioning Pipeline Select
4652     *   from 3D to GPGPU/Media.
4653     *   Software must ensure HDC Pipeline flush and Generic Media State Clear
4654     *   is issued through a stalling PIPE_CONTROL command prior to programming
4655     *   of PIPELINE_SELECT command transitioning Pipeline Select from
4656     *   GPGPU/Media to 3D."
4657     *
4658     * Note: Issuing PIPE_CONTROL_MEDIA_STATE_CLEAR causes GPU hangs, probably
4659     * because PIPE was not in MEDIA mode?!
4660     */
4661    bits |= ANV_PIPE_CS_STALL_BIT | ANV_PIPE_HDC_PIPELINE_FLUSH_BIT;
4662 
4663    if (cmd_buffer->state.current_pipeline == _3D) {
4664       bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
4665               ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
4666    } else {
4667       bits |= ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
4668    }
4669 #else
4670    /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
4671     * PIPELINE_SELECT [DevBWR+]":
4672     *
4673     *   Project: DEVSNB+
4674     *
4675     *   Software must ensure all the write caches are flushed through a
4676     *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
4677     *   command to invalidate read only caches prior to programming
4678     *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
4679     *
4680     * Note the cmd_buffer_apply_pipe_flushes will split this into two
4681     * PIPE_CONTROLs.
4682     */
4683    bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
4684            ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
4685            ANV_PIPE_HDC_PIPELINE_FLUSH_BIT |
4686            ANV_PIPE_CS_STALL_BIT |
4687            ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
4688            ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT |
4689            ANV_PIPE_STATE_CACHE_INVALIDATE_BIT |
4690            ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT |
4691            ANV_PIPE_UNTYPED_DATAPORT_CACHE_FLUSH_BIT;
4692 #endif
4693 
4694    /* Wa_16013063087 -  State Cache Invalidate must be issued prior to
4695     * PIPELINE_SELECT when switching from 3D to Compute.
4696     *
4697     * SW must do this by programming of PIPECONTROL with “CS Stall” followed by
4698     * a PIPECONTROL with State Cache Invalidate bit set.
4699     *
4700     */
4701    if (cmd_buffer->state.current_pipeline == _3D && pipeline == GPGPU &&
4702        intel_needs_workaround(cmd_buffer->device->info, 16013063087))
4703       bits |= ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
4704 
4705    anv_add_pending_pipe_bits(cmd_buffer, bits, "flush/invalidate PIPELINE_SELECT");
4706    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4707 
4708 #if GFX_VER == 9
4709    if (pipeline == _3D) {
4710       /* There is a mid-object preemption workaround which requires you to
4711        * re-emit MEDIA_VFE_STATE after switching from GPGPU to 3D.  However,
4712        * even without preemption, we have issues with geometry flickering when
4713        * GPGPU and 3D are back-to-back and this seems to fix it.  We don't
4714        * really know why.
4715        *
4716        * Also, from the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
4717        *
4718        *    "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4719        *    the only bits that are changed are scoreboard related ..."
4720        *
4721        * This is satisfied by applying pre-PIPELINE_SELECT pipe flushes above.
4722        */
4723       anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
4724          vfe.MaximumNumberofThreads =
4725             devinfo->max_cs_threads * devinfo->subslice_total - 1;
4726          vfe.NumberofURBEntries     = 2;
4727          vfe.URBEntryAllocationSize = 2;
4728       }
4729 
4730       /* We just emitted a dummy MEDIA_VFE_STATE so now that packet is
4731        * invalid. Set the compute pipeline to dirty to force a re-emit of the
4732        * pipeline in case we get back-to-back dispatch calls with the same
4733        * pipeline and a PIPELINE_SELECT in between.
4734        */
4735       cmd_buffer->state.compute.pipeline_dirty = true;
4736    }
4737 #endif
4738 
4739    genX(emit_pipeline_select)(&cmd_buffer->batch, pipeline, cmd_buffer->device);
4740 
4741 #if GFX_VER == 9
4742    if (devinfo->platform == INTEL_PLATFORM_GLK) {
4743       /* Project: DevGLK
4744        *
4745        * "This chicken bit works around a hardware issue with barrier logic
4746        *  encountered when switching between GPGPU and 3D pipelines.  To
4747        *  workaround the issue, this mode bit should be set after a pipeline
4748        *  is selected."
4749        */
4750       anv_batch_write_reg(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1), scec1) {
4751          scec1.GLKBarrierMode = pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU
4752                                                   : GLK_BARRIER_MODE_3D_HULL;
4753          scec1.GLKBarrierModeMask = 1;
4754       }
4755    }
4756 #endif
4757 
4758 #if GFX_VER == 9
4759    /* Undocumented workaround, we need to reemit MEDIA_CURBE_LOAD on Gfx9 when
4760     * switching from 3D->GPGPU, otherwise the shader gets corrupted push
4761     * constants. Note that this doesn't trigger a push constant reallocation,
4762     * we just reprogram the same pointer.
4763     *
4764     * The issue reproduces pretty much 100% on
4765     * dEQP-VK.memory_model.transitive.* tests. Reducing the number of
4766     * iteration in the test from 50 to < 10 makes the tests flaky.
4767     */
4768    if (pipeline == GPGPU)
4769       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4770 #endif
4771    cmd_buffer->state.current_pipeline = pipeline;
4772 }
4773 
4774 void
genX(flush_pipeline_select_3d)4775 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
4776 {
4777    genX(flush_pipeline_select)(cmd_buffer, _3D);
4778 }
4779 
4780 void
genX(flush_pipeline_select_gpgpu)4781 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
4782 {
4783    genX(flush_pipeline_select)(cmd_buffer, GPGPU);
4784 }
4785 
4786 void
genX(cmd_buffer_emit_gfx12_depth_wa)4787 genX(cmd_buffer_emit_gfx12_depth_wa)(struct anv_cmd_buffer *cmd_buffer,
4788                                      const struct isl_surf *surf)
4789 {
4790 #if INTEL_NEEDS_WA_1808121037
4791    const bool is_d16_1x_msaa = surf->format == ISL_FORMAT_R16_UNORM &&
4792                                surf->samples == 1;
4793 
4794    switch (cmd_buffer->state.gfx.depth_reg_mode) {
4795    case ANV_DEPTH_REG_MODE_HW_DEFAULT:
4796       if (!is_d16_1x_msaa)
4797          return;
4798       break;
4799    case ANV_DEPTH_REG_MODE_D16_1X_MSAA:
4800       if (is_d16_1x_msaa)
4801          return;
4802       break;
4803    case ANV_DEPTH_REG_MODE_UNKNOWN:
4804       break;
4805    }
4806 
4807    /* We'll change some CHICKEN registers depending on the depth surface
4808     * format. Do a depth flush and stall so the pipeline is not using these
4809     * settings while we change the registers.
4810     */
4811    anv_add_pending_pipe_bits(cmd_buffer,
4812                              ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
4813                              ANV_PIPE_DEPTH_STALL_BIT |
4814                              ANV_PIPE_END_OF_PIPE_SYNC_BIT,
4815                              "Workaround: Stop pipeline for 1808121037");
4816    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4817 
4818    /* Wa_1808121037
4819     *
4820     * To avoid sporadic corruptions “Set 0x7010[9] when Depth Buffer
4821     * Surface Format is D16_UNORM , surface type is not NULL & 1X_MSAA”.
4822     */
4823    anv_batch_write_reg(&cmd_buffer->batch, GENX(COMMON_SLICE_CHICKEN1), reg) {
4824       reg.HIZPlaneOptimizationdisablebit = is_d16_1x_msaa;
4825       reg.HIZPlaneOptimizationdisablebitMask = true;
4826    }
4827 
4828    cmd_buffer->state.gfx.depth_reg_mode =
4829       is_d16_1x_msaa ? ANV_DEPTH_REG_MODE_D16_1X_MSAA :
4830                        ANV_DEPTH_REG_MODE_HW_DEFAULT;
4831 #endif
4832 }
4833 
4834 #if GFX_VER == 9
4835 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
4836  *
4837  *    "The VF cache needs to be invalidated before binding and then using
4838  *    Vertex Buffers that overlap with any previously bound Vertex Buffer
4839  *    (at a 64B granularity) since the last invalidation.  A VF cache
4840  *    invalidate is performed by setting the "VF Cache Invalidation Enable"
4841  *    bit in PIPE_CONTROL."
4842  *
4843  * This is implemented by carefully tracking all vertex and index buffer
4844  * bindings and flushing if the cache ever ends up with a range in the cache
4845  * that would exceed 4 GiB.  This is implemented in three parts:
4846  *
4847  *    1. genX(cmd_buffer_set_binding_for_gfx8_vb_flush)() which must be called
4848  *       every time a 3DSTATE_VERTEX_BUFFER packet is emitted and informs the
4849  *       tracking code of the new binding.  If this new binding would cause
4850  *       the cache to have a too-large range on the next draw call, a pipeline
4851  *       stall and VF cache invalidate are added to pending_pipeline_bits.
4852  *
4853  *    2. genX(cmd_buffer_apply_pipe_flushes)() resets the cache tracking to
4854  *       empty whenever we emit a VF invalidate.
4855  *
4856  *    3. genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)() must be called
4857  *       after every 3DPRIMITIVE and copies the bound range into the dirty
4858  *       range for each used buffer.  This has to be a separate step because
4859  *       we don't always re-bind all buffers and so 1. can't know which
4860  *       buffers are actually bound.
4861  */
4862 void
genX(cmd_buffer_set_binding_for_gfx8_vb_flush)4863 genX(cmd_buffer_set_binding_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
4864                                                int vb_index,
4865                                                struct anv_address vb_address,
4866                                                uint32_t vb_size)
4867 {
4868    if (GFX_VER > 9)
4869       return;
4870 
4871    struct anv_vb_cache_range *bound, *dirty;
4872    if (vb_index == -1) {
4873       bound = &cmd_buffer->state.gfx.ib_bound_range;
4874       dirty = &cmd_buffer->state.gfx.ib_dirty_range;
4875    } else {
4876       assert(vb_index >= 0);
4877       assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
4878       assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
4879       bound = &cmd_buffer->state.gfx.vb_bound_ranges[vb_index];
4880       dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[vb_index];
4881    }
4882 
4883    if (anv_gfx8_9_vb_cache_range_needs_workaround(bound, dirty,
4884                                                   vb_address,
4885                                                   vb_size)) {
4886       anv_add_pending_pipe_bits(cmd_buffer,
4887                                 ANV_PIPE_CS_STALL_BIT |
4888                                 ANV_PIPE_VF_CACHE_INVALIDATE_BIT,
4889                                 "vb > 32b range");
4890    }
4891 }
4892 
4893 void
genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)4894 genX(cmd_buffer_update_dirty_vbs_for_gfx8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
4895                                                     uint32_t access_type,
4896                                                     uint64_t vb_used)
4897 {
4898    if (access_type == RANDOM) {
4899       /* We have an index buffer */
4900       struct anv_vb_cache_range *bound = &cmd_buffer->state.gfx.ib_bound_range;
4901       struct anv_vb_cache_range *dirty = &cmd_buffer->state.gfx.ib_dirty_range;
4902 
4903       anv_merge_vb_cache_range(dirty, bound);
4904    }
4905 
4906    uint64_t mask = vb_used;
4907    while (mask) {
4908       int i = u_bit_scan64(&mask);
4909       assert(i >= 0);
4910       assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
4911       assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
4912 
4913       struct anv_vb_cache_range *bound, *dirty;
4914       bound = &cmd_buffer->state.gfx.vb_bound_ranges[i];
4915       dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[i];
4916 
4917       anv_merge_vb_cache_range(dirty, bound);
4918    }
4919 }
4920 #endif /* GFX_VER == 9 */
4921 
4922 /**
4923  * Update the pixel hashing modes that determine the balancing of PS threads
4924  * across subslices and slices.
4925  *
4926  * \param width Width bound of the rendering area (already scaled down if \p
4927  *              scale is greater than 1).
4928  * \param height Height bound of the rendering area (already scaled down if \p
4929  *               scale is greater than 1).
4930  * \param scale The number of framebuffer samples that could potentially be
4931  *              affected by an individual channel of the PS thread.  This is
4932  *              typically one for single-sampled rendering, but for operations
4933  *              like CCS resolves and fast clears a single PS invocation may
4934  *              update a huge number of pixels, in which case a finer
4935  *              balancing is desirable in order to maximally utilize the
4936  *              bandwidth available.  UINT_MAX can be used as shorthand for
4937  *              "finest hashing mode available".
4938  */
4939 void
genX(cmd_buffer_emit_hashing_mode)4940 genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
4941                                    unsigned width, unsigned height,
4942                                    unsigned scale)
4943 {
4944 #if GFX_VER == 9
4945    const struct intel_device_info *devinfo = cmd_buffer->device->info;
4946    const unsigned slice_hashing[] = {
4947       /* Because all Gfx9 platforms with more than one slice require
4948        * three-way subslice hashing, a single "normal" 16x16 slice hashing
4949        * block is guaranteed to suffer from substantial imbalance, with one
4950        * subslice receiving twice as much work as the other two in the
4951        * slice.
4952        *
4953        * The performance impact of that would be particularly severe when
4954        * three-way hashing is also in use for slice balancing (which is the
4955        * case for all Gfx9 GT4 platforms), because one of the slices
4956        * receives one every three 16x16 blocks in either direction, which
4957        * is roughly the periodicity of the underlying subslice imbalance
4958        * pattern ("roughly" because in reality the hardware's
4959        * implementation of three-way hashing doesn't do exact modulo 3
4960        * arithmetic, which somewhat decreases the magnitude of this effect
4961        * in practice).  This leads to a systematic subslice imbalance
4962        * within that slice regardless of the size of the primitive.  The
4963        * 32x32 hashing mode guarantees that the subslice imbalance within a
4964        * single slice hashing block is minimal, largely eliminating this
4965        * effect.
4966        */
4967       _32x32,
4968       /* Finest slice hashing mode available. */
4969       NORMAL
4970    };
4971    const unsigned subslice_hashing[] = {
4972       /* 16x16 would provide a slight cache locality benefit especially
4973        * visible in the sampler L1 cache efficiency of low-bandwidth
4974        * non-LLC platforms, but it comes at the cost of greater subslice
4975        * imbalance for primitives of dimensions approximately intermediate
4976        * between 16x4 and 16x16.
4977        */
4978       _16x4,
4979       /* Finest subslice hashing mode available. */
4980       _8x4
4981    };
4982    /* Dimensions of the smallest hashing block of a given hashing mode.  If
4983     * the rendering area is smaller than this there can't possibly be any
4984     * benefit from switching to this mode, so we optimize out the
4985     * transition.
4986     */
4987    const unsigned min_size[][2] = {
4988          { 16, 4 },
4989          { 8, 4 }
4990    };
4991    const unsigned idx = scale > 1;
4992 
4993    if (cmd_buffer->state.current_hash_scale != scale &&
4994        (width > min_size[idx][0] || height > min_size[idx][1])) {
4995       anv_add_pending_pipe_bits(cmd_buffer,
4996                                 ANV_PIPE_CS_STALL_BIT |
4997                                 ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
4998                                 "change pixel hash mode");
4999       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5000 
5001       anv_batch_write_reg(&cmd_buffer->batch, GENX(GT_MODE), gt) {
5002          gt.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
5003          gt.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
5004          gt.SubsliceHashing = subslice_hashing[idx];
5005          gt.SubsliceHashingMask = -1;
5006       }
5007 
5008       cmd_buffer->state.current_hash_scale = scale;
5009    }
5010 #endif
5011 }
5012 
5013 static void
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer * cmd_buffer)5014 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
5015 {
5016    struct anv_device *device = cmd_buffer->device;
5017    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
5018 
5019    uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
5020                                         device->isl_dev.ds.size / 4);
5021    if (dw == NULL)
5022       return;
5023 
5024    struct isl_view isl_view = {};
5025    struct isl_depth_stencil_hiz_emit_info info = {
5026       .view = &isl_view,
5027       .mocs = anv_mocs(device, NULL, ISL_SURF_USAGE_DEPTH_BIT),
5028    };
5029 
5030    if (gfx->depth_att.iview != NULL) {
5031       isl_view = gfx->depth_att.iview->planes[0].isl;
5032    } else if (gfx->stencil_att.iview != NULL) {
5033       isl_view = gfx->stencil_att.iview->planes[0].isl;
5034    }
5035 
5036    if (gfx->view_mask) {
5037       assert(isl_view.array_len == 0 ||
5038              isl_view.array_len >= util_last_bit(gfx->view_mask));
5039       isl_view.array_len = util_last_bit(gfx->view_mask);
5040    } else {
5041       assert(isl_view.array_len == 0 ||
5042              isl_view.array_len >= util_last_bit(gfx->layer_count));
5043       isl_view.array_len = gfx->layer_count;
5044    }
5045 
5046    if (gfx->depth_att.iview != NULL) {
5047       const struct anv_image_view *iview = gfx->depth_att.iview;
5048       const struct anv_image *image = iview->image;
5049 
5050       const uint32_t depth_plane =
5051          anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_DEPTH_BIT);
5052       const struct anv_surface *depth_surface =
5053          &image->planes[depth_plane].primary_surface;
5054       const struct anv_address depth_address =
5055          anv_image_address(image, &depth_surface->memory_range);
5056 
5057       anv_reloc_list_add_bo(cmd_buffer->batch.relocs, depth_address.bo);
5058 
5059       info.depth_surf = &depth_surface->isl;
5060       info.depth_address = anv_address_physical(depth_address);
5061       info.mocs =
5062          anv_mocs(device, depth_address.bo, ISL_SURF_USAGE_DEPTH_BIT);
5063 
5064       info.hiz_usage = gfx->depth_att.aux_usage;
5065       if (info.hiz_usage != ISL_AUX_USAGE_NONE) {
5066          assert(isl_aux_usage_has_hiz(info.hiz_usage));
5067 
5068          const struct anv_surface *hiz_surface =
5069             &image->planes[depth_plane].aux_surface;
5070          const struct anv_address hiz_address =
5071             anv_image_address(image, &hiz_surface->memory_range);
5072 
5073          anv_reloc_list_add_bo(cmd_buffer->batch.relocs, hiz_address.bo);
5074 
5075          info.hiz_surf = &hiz_surface->isl;
5076          info.hiz_address = anv_address_physical(hiz_address);
5077 
5078          info.depth_clear_value = anv_image_hiz_clear_value(image).f32[0];
5079       }
5080    }
5081 
5082    if (gfx->stencil_att.iview != NULL) {
5083       const struct anv_image_view *iview = gfx->stencil_att.iview;
5084       const struct anv_image *image = iview->image;
5085 
5086       const uint32_t stencil_plane =
5087          anv_image_aspect_to_plane(image, VK_IMAGE_ASPECT_STENCIL_BIT);
5088       const struct anv_surface *stencil_surface =
5089          &image->planes[stencil_plane].primary_surface;
5090       const struct anv_address stencil_address =
5091          anv_image_address(image, &stencil_surface->memory_range);
5092 
5093       anv_reloc_list_add_bo(cmd_buffer->batch.relocs, stencil_address.bo);
5094 
5095       info.stencil_surf = &stencil_surface->isl;
5096 
5097       info.stencil_aux_usage = image->planes[stencil_plane].aux_usage;
5098       info.stencil_address = anv_address_physical(stencil_address);
5099       info.mocs =
5100          anv_mocs(device, stencil_address.bo, ISL_SURF_USAGE_STENCIL_BIT);
5101    }
5102 
5103    isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
5104 
5105    if (intel_needs_workaround(cmd_buffer->device->info, 1408224581) ||
5106        intel_needs_workaround(cmd_buffer->device->info, 14014097488) ||
5107        intel_needs_workaround(cmd_buffer->device->info, 14016712196)) {
5108       /* Wa_1408224581
5109        *
5110        * Workaround: Gfx12LP Astep only An additional pipe control with
5111        * post-sync = store dword operation would be required.( w/a is to have
5112        * an additional pipe control after the stencil state whenever the
5113        * surface state bits of this state is changing).
5114        *
5115        * This also seems sufficient to handle Wa_14014097488 and
5116        * Wa_14016712196.
5117        */
5118       genx_batch_emit_pipe_control_write(&cmd_buffer->batch, device->info,
5119                                          cmd_buffer->state.current_pipeline,
5120                                          WriteImmediateData,
5121                                          device->workaround_address, 0, 0);
5122    }
5123 
5124    if (info.depth_surf)
5125       genX(cmd_buffer_emit_gfx12_depth_wa)(cmd_buffer, info.depth_surf);
5126 
5127    cmd_buffer->state.gfx.hiz_enabled = isl_aux_usage_has_hiz(info.hiz_usage);
5128 }
5129 
5130 static void
cmd_buffer_emit_cps_control_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image_view * fsr_iview)5131 cmd_buffer_emit_cps_control_buffer(struct anv_cmd_buffer *cmd_buffer,
5132                                    const struct anv_image_view *fsr_iview)
5133 {
5134 #if GFX_VERx10 >= 125
5135    struct anv_device *device = cmd_buffer->device;
5136 
5137    if (!device->vk.enabled_extensions.KHR_fragment_shading_rate)
5138       return;
5139 
5140    uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
5141                                         device->isl_dev.cpb.size / 4);
5142    if (dw == NULL)
5143       return;
5144 
5145    struct isl_cpb_emit_info info = { };
5146 
5147    if (fsr_iview) {
5148       const struct anv_image_binding *binding = &fsr_iview->image->bindings[0];
5149 
5150       anv_reloc_list_add_bo(cmd_buffer->batch.relocs, binding->address.bo);
5151 
5152       struct anv_address addr =
5153          anv_address_add(binding->address, binding->memory_range.offset);
5154 
5155       info.view = &fsr_iview->planes[0].isl;
5156       info.surf = &fsr_iview->image->planes[0].primary_surface.isl;
5157       info.address = anv_address_physical(addr);
5158       info.mocs =
5159          anv_mocs(device, fsr_iview->image->bindings[0].address.bo,
5160                   ISL_SURF_USAGE_CPB_BIT);
5161    }
5162 
5163    isl_emit_cpb_control_s(&device->isl_dev, dw, &info);
5164 
5165    /* Wa_14016712196:
5166     * Emit dummy pipe control after state that sends implicit depth flush.
5167     */
5168    if (intel_needs_workaround(device->info, 14016712196)) {
5169       genx_batch_emit_pipe_control_write(&cmd_buffer->batch, device->info,
5170                                          cmd_buffer->state.current_pipeline,
5171                                          WriteImmediateData,
5172                                          device->workaround_address, 0, 0);
5173    }
5174 
5175 #endif /* GFX_VERx10 >= 125 */
5176 }
5177 
5178 static VkImageLayout
attachment_initial_layout(const VkRenderingAttachmentInfo * att)5179 attachment_initial_layout(const VkRenderingAttachmentInfo *att)
5180 {
5181    const VkRenderingAttachmentInitialLayoutInfoMESA *layout_info =
5182       vk_find_struct_const(att->pNext,
5183                            RENDERING_ATTACHMENT_INITIAL_LAYOUT_INFO_MESA);
5184    if (layout_info != NULL)
5185       return layout_info->initialLayout;
5186 
5187    return att->imageLayout;
5188 }
5189 
genX(CmdBeginRendering)5190 void genX(CmdBeginRendering)(
5191     VkCommandBuffer                             commandBuffer,
5192     const VkRenderingInfo*                      pRenderingInfo)
5193 {
5194    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5195    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
5196    VkResult result;
5197 
5198    if (!anv_cmd_buffer_is_render_queue(cmd_buffer)) {
5199       assert(!"Trying to start a render pass on non-render queue!");
5200       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_UNKNOWN);
5201       return;
5202    }
5203 
5204    anv_measure_beginrenderpass(cmd_buffer);
5205    trace_intel_begin_render_pass(&cmd_buffer->trace);
5206 
5207    gfx->rendering_flags = pRenderingInfo->flags;
5208    gfx->view_mask = pRenderingInfo->viewMask;
5209    gfx->layer_count = pRenderingInfo->layerCount;
5210    gfx->samples = 0;
5211 
5212    if (gfx->render_area.offset.x != pRenderingInfo->renderArea.offset.x ||
5213        gfx->render_area.offset.y != pRenderingInfo->renderArea.offset.y ||
5214        gfx->render_area.extent.width != pRenderingInfo->renderArea.extent.width ||
5215        gfx->render_area.extent.height != pRenderingInfo->renderArea.extent.height) {
5216       gfx->render_area = pRenderingInfo->renderArea;
5217       gfx->dirty |= ANV_CMD_DIRTY_RENDER_AREA;
5218    }
5219 
5220    const bool is_multiview = gfx->view_mask != 0;
5221    const VkRect2D render_area = gfx->render_area;
5222    const uint32_t layers =
5223       is_multiview ? util_last_bit(gfx->view_mask) : gfx->layer_count;
5224 
5225    /* The framebuffer size is at least large enough to contain the render
5226     * area.  Because a zero renderArea is possible, we MAX with 1.
5227     */
5228    struct isl_extent3d fb_size = {
5229       .w = MAX2(1, render_area.offset.x + render_area.extent.width),
5230       .h = MAX2(1, render_area.offset.y + render_area.extent.height),
5231       .d = layers,
5232    };
5233 
5234    const uint32_t color_att_count = pRenderingInfo->colorAttachmentCount;
5235 
5236    result = anv_cmd_buffer_init_attachments(cmd_buffer, color_att_count);
5237    if (result != VK_SUCCESS)
5238       return;
5239 
5240    genX(flush_pipeline_select_3d)(cmd_buffer);
5241 
5242    UNUSED bool render_target_change = false;
5243    for (uint32_t i = 0; i < gfx->color_att_count; i++) {
5244       if (pRenderingInfo->pColorAttachments[i].imageView == VK_NULL_HANDLE) {
5245          render_target_change |= gfx->color_att[i].iview != NULL;
5246 
5247          gfx->color_att[i].vk_format = VK_FORMAT_UNDEFINED;
5248          gfx->color_att[i].iview = NULL;
5249          gfx->color_att[i].layout = VK_IMAGE_LAYOUT_UNDEFINED;
5250          gfx->color_att[i].aux_usage = ISL_AUX_USAGE_NONE;
5251          continue;
5252       }
5253 
5254       const VkRenderingAttachmentInfo *att =
5255          &pRenderingInfo->pColorAttachments[i];
5256       ANV_FROM_HANDLE(anv_image_view, iview, att->imageView);
5257       const VkImageLayout initial_layout = attachment_initial_layout(att);
5258 
5259       assert(render_area.offset.x + render_area.extent.width <=
5260              iview->vk.extent.width);
5261       assert(render_area.offset.y + render_area.extent.height <=
5262              iview->vk.extent.height);
5263       assert(layers <= iview->vk.layer_count);
5264 
5265       fb_size.w = MAX2(fb_size.w, iview->vk.extent.width);
5266       fb_size.h = MAX2(fb_size.h, iview->vk.extent.height);
5267 
5268       assert(gfx->samples == 0 || gfx->samples == iview->vk.image->samples);
5269       gfx->samples |= iview->vk.image->samples;
5270 
5271       enum isl_aux_usage aux_usage =
5272          anv_layout_to_aux_usage(cmd_buffer->device->info,
5273                                  iview->image,
5274                                  VK_IMAGE_ASPECT_COLOR_BIT,
5275                                  VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5276                                  att->imageLayout,
5277                                  cmd_buffer->queue_family->queueFlags);
5278 
5279       render_target_change |= gfx->color_att[i].iview != iview;
5280 
5281       gfx->color_att[i].vk_format = iview->vk.format;
5282       gfx->color_att[i].iview = iview;
5283       gfx->color_att[i].layout = att->imageLayout;
5284       gfx->color_att[i].aux_usage = aux_usage;
5285 
5286       union isl_color_value fast_clear_color = { .u32 = { 0, } };
5287 
5288       if (att->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR &&
5289           !(gfx->rendering_flags & VK_RENDERING_RESUMING_BIT)) {
5290          uint32_t clear_view_mask = pRenderingInfo->viewMask;
5291          VkClearRect clear_rect = {
5292             .rect = render_area,
5293             .baseArrayLayer = iview->vk.base_array_layer,
5294             .layerCount = layers,
5295          };
5296          const union isl_color_value clear_color =
5297             vk_to_isl_color_with_format(att->clearValue.color,
5298                                         iview->planes[0].isl.format);
5299 
5300          /* We only support fast-clears on the first layer */
5301          const bool fast_clear =
5302             (!is_multiview || (gfx->view_mask & 1)) &&
5303             anv_can_fast_clear_color(cmd_buffer, iview->image,
5304                                      iview->vk.base_mip_level,
5305                                      &clear_rect, att->imageLayout,
5306                                      iview->planes[0].isl.format,
5307                                      clear_color);
5308 
5309          if (att->imageLayout != initial_layout) {
5310             assert(render_area.offset.x == 0 && render_area.offset.y == 0 &&
5311                    render_area.extent.width == iview->vk.extent.width &&
5312                    render_area.extent.height == iview->vk.extent.height);
5313             if (is_multiview) {
5314                u_foreach_bit(view, gfx->view_mask) {
5315                   transition_color_buffer(cmd_buffer, iview->image,
5316                                           VK_IMAGE_ASPECT_COLOR_BIT,
5317                                           iview->vk.base_mip_level, 1,
5318                                           iview->vk.base_array_layer + view,
5319                                           1, /* layer_count */
5320                                           initial_layout, att->imageLayout,
5321                                           VK_QUEUE_FAMILY_IGNORED,
5322                                           VK_QUEUE_FAMILY_IGNORED,
5323                                           fast_clear);
5324                }
5325             } else {
5326                transition_color_buffer(cmd_buffer, iview->image,
5327                                        VK_IMAGE_ASPECT_COLOR_BIT,
5328                                        iview->vk.base_mip_level, 1,
5329                                        iview->vk.base_array_layer,
5330                                        gfx->layer_count,
5331                                        initial_layout, att->imageLayout,
5332                                        VK_QUEUE_FAMILY_IGNORED,
5333                                        VK_QUEUE_FAMILY_IGNORED,
5334                                        fast_clear);
5335             }
5336          }
5337 
5338          if (fast_clear) {
5339             /* We only support fast-clears on the first layer */
5340             assert(iview->vk.base_mip_level == 0 &&
5341                    iview->vk.base_array_layer == 0);
5342 
5343             fast_clear_color = clear_color;
5344 
5345             if (iview->image->vk.samples == 1) {
5346                anv_image_ccs_op(cmd_buffer, iview->image,
5347                                 iview->planes[0].isl.format,
5348                                 iview->planes[0].isl.swizzle,
5349                                 VK_IMAGE_ASPECT_COLOR_BIT,
5350                                 0, 0, 1, ISL_AUX_OP_FAST_CLEAR,
5351                                 &fast_clear_color,
5352                                 false);
5353             } else {
5354                anv_image_mcs_op(cmd_buffer, iview->image,
5355                                 iview->planes[0].isl.format,
5356                                 iview->planes[0].isl.swizzle,
5357                                 VK_IMAGE_ASPECT_COLOR_BIT,
5358                                 0, 1, ISL_AUX_OP_FAST_CLEAR,
5359                                 &fast_clear_color,
5360                                 false);
5361             }
5362             clear_view_mask &= ~1u;
5363             clear_rect.baseArrayLayer++;
5364             clear_rect.layerCount--;
5365 #if GFX_VER < 20
5366             genX(set_fast_clear_state)(cmd_buffer, iview->image,
5367                                        iview->planes[0].isl.format,
5368                                        iview->planes[0].isl.swizzle,
5369                                        clear_color);
5370 #endif
5371          }
5372 
5373          if (is_multiview) {
5374             u_foreach_bit(view, clear_view_mask) {
5375                anv_image_clear_color(cmd_buffer, iview->image,
5376                                      VK_IMAGE_ASPECT_COLOR_BIT,
5377                                      aux_usage,
5378                                      iview->planes[0].isl.format,
5379                                      iview->planes[0].isl.swizzle,
5380                                      iview->vk.base_mip_level,
5381                                      iview->vk.base_array_layer + view, 1,
5382                                      render_area, clear_color);
5383             }
5384          } else if (clear_rect.layerCount > 0) {
5385             anv_image_clear_color(cmd_buffer, iview->image,
5386                                   VK_IMAGE_ASPECT_COLOR_BIT,
5387                                   aux_usage,
5388                                   iview->planes[0].isl.format,
5389                                   iview->planes[0].isl.swizzle,
5390                                   iview->vk.base_mip_level,
5391                                   clear_rect.baseArrayLayer,
5392                                   clear_rect.layerCount,
5393                                   render_area, clear_color);
5394          }
5395       } else {
5396          /* If not LOAD_OP_CLEAR, we shouldn't have a layout transition. */
5397          assert(att->imageLayout == initial_layout);
5398       }
5399 
5400       struct isl_view isl_view = iview->planes[0].isl;
5401       if (pRenderingInfo->viewMask) {
5402          assert(isl_view.array_len >= util_last_bit(pRenderingInfo->viewMask));
5403          isl_view.array_len = util_last_bit(pRenderingInfo->viewMask);
5404       } else {
5405          assert(isl_view.array_len >= pRenderingInfo->layerCount);
5406          isl_view.array_len = pRenderingInfo->layerCount;
5407       }
5408 
5409       anv_image_fill_surface_state(cmd_buffer->device,
5410                                    iview->image,
5411                                    VK_IMAGE_ASPECT_COLOR_BIT,
5412                                    &isl_view,
5413                                    ISL_SURF_USAGE_RENDER_TARGET_BIT,
5414                                    aux_usage, &fast_clear_color,
5415                                    0, /* anv_image_view_state_flags */
5416                                    &gfx->color_att[i].surface_state);
5417 
5418       add_surface_state_relocs(cmd_buffer, &gfx->color_att[i].surface_state);
5419 
5420       if (GFX_VER < 10 &&
5421           (att->loadOp == VK_ATTACHMENT_LOAD_OP_LOAD ||
5422            render_area.extent.width != iview->vk.extent.width ||
5423            render_area.extent.height != iview->vk.extent.height ||
5424            (gfx->rendering_flags & VK_RENDERING_RESUMING_BIT)) &&
5425           iview->image->planes[0].aux_usage != ISL_AUX_USAGE_NONE &&
5426           iview->planes[0].isl.base_level == 0 &&
5427           iview->planes[0].isl.base_array_layer == 0) {
5428          struct anv_state surf_state = gfx->color_att[i].surface_state.state;
5429          genX(cmd_buffer_load_clear_color)(cmd_buffer, surf_state, iview);
5430       }
5431 
5432       if (att->resolveMode != VK_RESOLVE_MODE_NONE) {
5433          gfx->color_att[i].resolve_mode = att->resolveMode;
5434          gfx->color_att[i].resolve_iview =
5435             anv_image_view_from_handle(att->resolveImageView);
5436          gfx->color_att[i].resolve_layout = att->resolveImageLayout;
5437       }
5438    }
5439 
5440    anv_cmd_graphic_state_update_has_uint_rt(gfx);
5441 
5442    const struct anv_image_view *fsr_iview = NULL;
5443    const VkRenderingFragmentShadingRateAttachmentInfoKHR *fsr_att =
5444       vk_find_struct_const(pRenderingInfo->pNext,
5445                            RENDERING_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR);
5446    if (fsr_att != NULL && fsr_att->imageView != VK_NULL_HANDLE) {
5447       fsr_iview = anv_image_view_from_handle(fsr_att->imageView);
5448       /* imageLayout and shadingRateAttachmentTexelSize are ignored */
5449    }
5450 
5451    const struct anv_image_view *ds_iview = NULL;
5452    const VkRenderingAttachmentInfo *d_att = pRenderingInfo->pDepthAttachment;
5453    const VkRenderingAttachmentInfo *s_att = pRenderingInfo->pStencilAttachment;
5454    if ((d_att != NULL && d_att->imageView != VK_NULL_HANDLE) ||
5455        (s_att != NULL && s_att->imageView != VK_NULL_HANDLE)) {
5456       const struct anv_image_view *d_iview = NULL, *s_iview = NULL;
5457       VkImageLayout depth_layout = VK_IMAGE_LAYOUT_UNDEFINED;
5458       VkImageLayout stencil_layout = VK_IMAGE_LAYOUT_UNDEFINED;
5459       VkImageLayout initial_depth_layout = VK_IMAGE_LAYOUT_UNDEFINED;
5460       VkImageLayout initial_stencil_layout = VK_IMAGE_LAYOUT_UNDEFINED;
5461       enum isl_aux_usage depth_aux_usage = ISL_AUX_USAGE_NONE;
5462       enum isl_aux_usage stencil_aux_usage = ISL_AUX_USAGE_NONE;
5463       VkClearDepthStencilValue clear_value = {};
5464 
5465       if (d_att != NULL && d_att->imageView != VK_NULL_HANDLE) {
5466          d_iview = anv_image_view_from_handle(d_att->imageView);
5467          initial_depth_layout = attachment_initial_layout(d_att);
5468          depth_layout = d_att->imageLayout;
5469          depth_aux_usage =
5470             anv_layout_to_aux_usage(cmd_buffer->device->info,
5471                                     d_iview->image,
5472                                     VK_IMAGE_ASPECT_DEPTH_BIT,
5473                                     VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
5474                                     depth_layout,
5475                                     cmd_buffer->queue_family->queueFlags);
5476          clear_value.depth = d_att->clearValue.depthStencil.depth;
5477       }
5478 
5479       if (s_att != NULL && s_att->imageView != VK_NULL_HANDLE) {
5480          s_iview = anv_image_view_from_handle(s_att->imageView);
5481          initial_stencil_layout = attachment_initial_layout(s_att);
5482          stencil_layout = s_att->imageLayout;
5483          stencil_aux_usage =
5484             anv_layout_to_aux_usage(cmd_buffer->device->info,
5485                                     s_iview->image,
5486                                     VK_IMAGE_ASPECT_STENCIL_BIT,
5487                                     VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
5488                                     stencil_layout,
5489                                     cmd_buffer->queue_family->queueFlags);
5490          clear_value.stencil = s_att->clearValue.depthStencil.stencil;
5491       }
5492 
5493       assert(s_iview == NULL || d_iview == NULL || s_iview == d_iview);
5494       ds_iview = d_iview != NULL ? d_iview : s_iview;
5495       assert(ds_iview != NULL);
5496 
5497       assert(render_area.offset.x + render_area.extent.width <=
5498              ds_iview->vk.extent.width);
5499       assert(render_area.offset.y + render_area.extent.height <=
5500              ds_iview->vk.extent.height);
5501       assert(layers <= ds_iview->vk.layer_count);
5502 
5503       fb_size.w = MAX2(fb_size.w, ds_iview->vk.extent.width);
5504       fb_size.h = MAX2(fb_size.h, ds_iview->vk.extent.height);
5505 
5506       assert(gfx->samples == 0 || gfx->samples == ds_iview->vk.image->samples);
5507       gfx->samples |= ds_iview->vk.image->samples;
5508 
5509       VkImageAspectFlags clear_aspects = 0;
5510       if (d_iview != NULL && d_att->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR &&
5511           !(gfx->rendering_flags & VK_RENDERING_RESUMING_BIT))
5512          clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
5513       if (s_iview != NULL && s_att->loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR &&
5514           !(gfx->rendering_flags & VK_RENDERING_RESUMING_BIT))
5515          clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
5516 
5517       if (clear_aspects != 0) {
5518          const bool hiz_clear =
5519             anv_can_hiz_clear_ds_view(cmd_buffer->device, d_iview,
5520                                       depth_layout, clear_aspects,
5521                                       clear_value.depth,
5522                                       render_area,
5523                                       cmd_buffer->queue_family->queueFlags);
5524 
5525          if (depth_layout != initial_depth_layout) {
5526             assert(render_area.offset.x == 0 && render_area.offset.y == 0 &&
5527                    render_area.extent.width == d_iview->vk.extent.width &&
5528                    render_area.extent.height == d_iview->vk.extent.height);
5529 
5530             if (is_multiview) {
5531                u_foreach_bit(view, gfx->view_mask) {
5532                   transition_depth_buffer(cmd_buffer, d_iview->image,
5533                                           d_iview->vk.base_mip_level, 1,
5534                                           d_iview->vk.base_array_layer + view,
5535                                           1 /* layer_count */,
5536                                           initial_depth_layout, depth_layout,
5537                                           hiz_clear);
5538                }
5539             } else {
5540                transition_depth_buffer(cmd_buffer, d_iview->image,
5541                                        d_iview->vk.base_mip_level, 1,
5542                                        d_iview->vk.base_array_layer,
5543                                        gfx->layer_count,
5544                                        initial_depth_layout, depth_layout,
5545                                        hiz_clear);
5546             }
5547          }
5548 
5549          if (stencil_layout != initial_stencil_layout) {
5550             assert(render_area.offset.x == 0 && render_area.offset.y == 0 &&
5551                    render_area.extent.width == s_iview->vk.extent.width &&
5552                    render_area.extent.height == s_iview->vk.extent.height);
5553 
5554             if (is_multiview) {
5555                u_foreach_bit(view, gfx->view_mask) {
5556                   transition_stencil_buffer(cmd_buffer, s_iview->image,
5557                                             s_iview->vk.base_mip_level, 1,
5558                                             s_iview->vk.base_array_layer + view,
5559                                             1 /* layer_count */,
5560                                             initial_stencil_layout,
5561                                             stencil_layout,
5562                                             hiz_clear);
5563                }
5564             } else {
5565                transition_stencil_buffer(cmd_buffer, s_iview->image,
5566                                          s_iview->vk.base_mip_level, 1,
5567                                          s_iview->vk.base_array_layer,
5568                                          gfx->layer_count,
5569                                          initial_stencil_layout,
5570                                          stencil_layout,
5571                                          hiz_clear);
5572             }
5573          }
5574 
5575          if (is_multiview) {
5576             u_foreach_bit(view, gfx->view_mask) {
5577                uint32_t level = ds_iview->vk.base_mip_level;
5578                uint32_t layer = ds_iview->vk.base_array_layer + view;
5579 
5580                if (hiz_clear) {
5581                   anv_image_hiz_clear(cmd_buffer, ds_iview->image,
5582                                       clear_aspects,
5583                                       level, layer, 1,
5584                                       render_area, &clear_value);
5585                } else {
5586                   anv_image_clear_depth_stencil(cmd_buffer, ds_iview->image,
5587                                                 clear_aspects,
5588                                                 depth_aux_usage,
5589                                                 level, layer, 1,
5590                                                 render_area, &clear_value);
5591                }
5592             }
5593          } else {
5594             uint32_t level = ds_iview->vk.base_mip_level;
5595             uint32_t base_layer = ds_iview->vk.base_array_layer;
5596             uint32_t layer_count = gfx->layer_count;
5597 
5598             if (hiz_clear) {
5599                anv_image_hiz_clear(cmd_buffer, ds_iview->image,
5600                                    clear_aspects,
5601                                    level, base_layer, layer_count,
5602                                    render_area, &clear_value);
5603             } else {
5604                anv_image_clear_depth_stencil(cmd_buffer, ds_iview->image,
5605                                              clear_aspects,
5606                                              depth_aux_usage,
5607                                              level, base_layer, layer_count,
5608                                              render_area, &clear_value);
5609             }
5610          }
5611       } else {
5612          /* If not LOAD_OP_CLEAR, we shouldn't have a layout transition. */
5613          assert(depth_layout == initial_depth_layout);
5614          assert(stencil_layout == initial_stencil_layout);
5615       }
5616 
5617       if (d_iview != NULL) {
5618          gfx->depth_att.vk_format = d_iview->vk.format;
5619          gfx->depth_att.iview = d_iview;
5620          gfx->depth_att.layout = depth_layout;
5621          gfx->depth_att.aux_usage = depth_aux_usage;
5622          if (d_att != NULL && d_att->resolveMode != VK_RESOLVE_MODE_NONE) {
5623             assert(d_att->resolveImageView != VK_NULL_HANDLE);
5624             gfx->depth_att.resolve_mode = d_att->resolveMode;
5625             gfx->depth_att.resolve_iview =
5626                anv_image_view_from_handle(d_att->resolveImageView);
5627             gfx->depth_att.resolve_layout = d_att->resolveImageLayout;
5628          }
5629       }
5630 
5631       if (s_iview != NULL) {
5632          gfx->stencil_att.vk_format = s_iview->vk.format;
5633          gfx->stencil_att.iview = s_iview;
5634          gfx->stencil_att.layout = stencil_layout;
5635          gfx->stencil_att.aux_usage = stencil_aux_usage;
5636          if (s_att->resolveMode != VK_RESOLVE_MODE_NONE) {
5637             assert(s_att->resolveImageView != VK_NULL_HANDLE);
5638             gfx->stencil_att.resolve_mode = s_att->resolveMode;
5639             gfx->stencil_att.resolve_iview =
5640                anv_image_view_from_handle(s_att->resolveImageView);
5641             gfx->stencil_att.resolve_layout = s_att->resolveImageLayout;
5642          }
5643       }
5644    }
5645 
5646    /* Finally, now that we know the right size, set up the null surface */
5647    assert(util_bitcount(gfx->samples) <= 1);
5648    isl_null_fill_state(&cmd_buffer->device->isl_dev,
5649                        gfx->null_surface_state.map,
5650                        .size = fb_size);
5651 
5652    for (uint32_t i = 0; i < gfx->color_att_count; i++) {
5653       if (pRenderingInfo->pColorAttachments[i].imageView != VK_NULL_HANDLE)
5654          continue;
5655 
5656       isl_null_fill_state(&cmd_buffer->device->isl_dev,
5657                           gfx->color_att[i].surface_state.state.map,
5658                           .size = fb_size);
5659    }
5660 
5661    /****** We can now start emitting code to begin the render pass ******/
5662 
5663    gfx->dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
5664 
5665    /* It is possible to start a render pass with an old pipeline.  Because the
5666     * render pass and subpass index are both baked into the pipeline, this is
5667     * highly unlikely.  In order to do so, it requires that you have a render
5668     * pass with a single subpass and that you use that render pass twice
5669     * back-to-back and use the same pipeline at the start of the second render
5670     * pass as at the end of the first.  In order to avoid unpredictable issues
5671     * with this edge case, we just dirty the pipeline at the start of every
5672     * subpass.
5673     */
5674    gfx->dirty |= ANV_CMD_DIRTY_PIPELINE;
5675 
5676 #if GFX_VER >= 11
5677    if (render_target_change) {
5678       /* The PIPE_CONTROL command description says:
5679       *
5680       *    "Whenever a Binding Table Index (BTI) used by a Render Target Message
5681       *     points to a different RENDER_SURFACE_STATE, SW must issue a Render
5682       *     Target Cache Flush by enabling this bit. When render target flush
5683       *     is set due to new association of BTI, PS Scoreboard Stall bit must
5684       *     be set in this packet."
5685       *
5686       * We assume that a new BeginRendering is always changing the RTs, which
5687       * may not be true and cause excessive flushing.  We can trivially skip it
5688       * in the case that there are no RTs (depth-only rendering), though.
5689       */
5690       anv_add_pending_pipe_bits(cmd_buffer,
5691                               ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
5692                               ANV_PIPE_STALL_AT_SCOREBOARD_BIT,
5693                               "change RT");
5694    }
5695 #endif
5696 
5697    cmd_buffer_emit_depth_stencil(cmd_buffer);
5698 
5699    cmd_buffer_emit_cps_control_buffer(cmd_buffer, fsr_iview);
5700 }
5701 
5702 static void
cmd_buffer_mark_attachment_written(struct anv_cmd_buffer * cmd_buffer,struct anv_attachment * att,VkImageAspectFlagBits aspect)5703 cmd_buffer_mark_attachment_written(struct anv_cmd_buffer *cmd_buffer,
5704                                    struct anv_attachment *att,
5705                                    VkImageAspectFlagBits aspect)
5706 {
5707 #if GFX_VER < 20
5708    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
5709    const struct anv_image_view *iview = att->iview;
5710 
5711    if (iview == NULL)
5712       return;
5713 
5714    if (gfx->view_mask == 0) {
5715       genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
5716                                           aspect, att->aux_usage,
5717                                           iview->planes[0].isl.base_level,
5718                                           iview->planes[0].isl.base_array_layer,
5719                                           gfx->layer_count);
5720    } else {
5721       uint32_t res_view_mask = gfx->view_mask;
5722       while (res_view_mask) {
5723          int i = u_bit_scan(&res_view_mask);
5724 
5725          const uint32_t level = iview->planes[0].isl.base_level;
5726          const uint32_t layer = iview->planes[0].isl.base_array_layer + i;
5727 
5728          genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
5729                                              aspect, att->aux_usage,
5730                                              level, layer, 1);
5731       }
5732    }
5733 #endif
5734 }
5735 
genX(CmdEndRendering)5736 void genX(CmdEndRendering)(
5737     VkCommandBuffer                             commandBuffer)
5738 {
5739    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5740    struct anv_cmd_graphics_state *gfx = &cmd_buffer->state.gfx;
5741 
5742    if (anv_batch_has_error(&cmd_buffer->batch))
5743       return;
5744 
5745    const bool is_multiview = gfx->view_mask != 0;
5746    const uint32_t layers =
5747       is_multiview ? util_last_bit(gfx->view_mask) : gfx->layer_count;
5748 
5749    for (uint32_t i = 0; i < gfx->color_att_count; i++) {
5750       cmd_buffer_mark_attachment_written(cmd_buffer, &gfx->color_att[i],
5751                                          VK_IMAGE_ASPECT_COLOR_BIT);
5752    }
5753 
5754    cmd_buffer_mark_attachment_written(cmd_buffer, &gfx->depth_att,
5755                                        VK_IMAGE_ASPECT_DEPTH_BIT);
5756 
5757    cmd_buffer_mark_attachment_written(cmd_buffer, &gfx->stencil_att,
5758                                        VK_IMAGE_ASPECT_STENCIL_BIT);
5759 
5760 
5761    if (!(gfx->rendering_flags & VK_RENDERING_SUSPENDING_BIT)) {
5762       bool has_color_resolve = false;
5763       UNUSED bool has_sparse_color_resolve = false;
5764 
5765       for (uint32_t i = 0; i < gfx->color_att_count; i++) {
5766          if (gfx->color_att[i].resolve_mode != VK_RESOLVE_MODE_NONE) {
5767             has_color_resolve = true;
5768             has_sparse_color_resolve |=
5769                anv_image_is_sparse(gfx->color_att[i].iview->image);
5770          }
5771       }
5772 
5773       if (has_color_resolve) {
5774          /* We are about to do some MSAA resolves.  We need to flush so that
5775           * the result of writes to the MSAA color attachments show up in the
5776           * sampler when we blit to the single-sampled resolve target.
5777           */
5778          anv_add_pending_pipe_bits(cmd_buffer,
5779                                    ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
5780                                    ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT,
5781                                    "MSAA resolve");
5782       }
5783 
5784       const bool has_depth_resolve =
5785          gfx->depth_att.resolve_mode != VK_RESOLVE_MODE_NONE;
5786       const bool has_stencil_resolve =
5787          gfx->stencil_att.resolve_mode != VK_RESOLVE_MODE_NONE;
5788 
5789       if (has_depth_resolve || has_stencil_resolve) {
5790          /* We are about to do some MSAA resolves.  We need to flush so that
5791           * the result of writes to the MSAA depth attachments show up in the
5792           * sampler when we blit to the single-sampled resolve target.
5793           */
5794          anv_add_pending_pipe_bits(cmd_buffer,
5795                                  ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
5796                                  ANV_PIPE_DEPTH_CACHE_FLUSH_BIT,
5797                                  "MSAA resolve");
5798       }
5799 
5800 #if GFX_VER < 20
5801       const bool has_sparse_depth_resolve =
5802          has_depth_resolve &&
5803          anv_image_is_sparse(gfx->depth_att.iview->image);
5804       const bool has_sparse_stencil_resolve =
5805          has_stencil_resolve &&
5806          anv_image_is_sparse(gfx->stencil_att.iview->image);
5807       /* Our HW implementation of the sparse feature prior to Xe2 lives in the
5808        * GAM unit (interface between all the GPU caches and external memory).
5809        * As a result writes to NULL bound images & buffers that should be
5810        * ignored are actually still visible in the caches. The only way for us
5811        * to get correct NULL bound regions to return 0s is to evict the caches
5812        * to force the caches to be repopulated with 0s.
5813        *
5814        * Our understanding is that Xe2 started to tag the L3 cache with some
5815        * kind physical address information rather. It is therefore able to
5816        * detect that a cache line in the cache is going to a null tile and so
5817        * the L3 cache also has a sparse compatible behavior and we don't need
5818        * to flush anymore.
5819        */
5820       if (has_sparse_color_resolve || has_sparse_depth_resolve ||
5821           has_sparse_stencil_resolve) {
5822          /* If the resolve image is sparse we need some extra bits to make
5823           * sure unbound regions read 0, as residencyNonResidentStrict
5824           * mandates.
5825           */
5826          anv_add_pending_pipe_bits(cmd_buffer, ANV_PIPE_TILE_CACHE_FLUSH_BIT,
5827                                    "sparse MSAA resolve");
5828       }
5829 #endif
5830 
5831       for (uint32_t i = 0; i < gfx->color_att_count; i++) {
5832          const struct anv_attachment *att = &gfx->color_att[i];
5833          if (att->resolve_mode == VK_RESOLVE_MODE_NONE)
5834             continue;
5835 
5836          anv_attachment_msaa_resolve(cmd_buffer, att, att->layout,
5837                                      VK_IMAGE_ASPECT_COLOR_BIT);
5838       }
5839 
5840       if (has_depth_resolve) {
5841          const struct anv_image_view *src_iview = gfx->depth_att.iview;
5842 
5843          /* MSAA resolves sample from the source attachment.  Transition the
5844           * depth attachment first to get rid of any HiZ that we may not be
5845           * able to handle.
5846           */
5847          transition_depth_buffer(cmd_buffer, src_iview->image, 0, 1,
5848                                  src_iview->planes[0].isl.base_array_layer,
5849                                  layers,
5850                                  gfx->depth_att.layout,
5851                                  VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
5852                                  false /* will_full_fast_clear */);
5853 
5854          anv_attachment_msaa_resolve(cmd_buffer, &gfx->depth_att,
5855                                      VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
5856                                      VK_IMAGE_ASPECT_DEPTH_BIT);
5857 
5858          /* Transition the source back to the original layout.  This seems a
5859           * bit inefficient but, since HiZ resolves aren't destructive, going
5860           * from less HiZ to more is generally a no-op.
5861           */
5862          transition_depth_buffer(cmd_buffer, src_iview->image, 0, 1,
5863                                  src_iview->planes[0].isl.base_array_layer,
5864                                  layers,
5865                                  VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
5866                                  gfx->depth_att.layout,
5867                                  false /* will_full_fast_clear */);
5868       }
5869 
5870       if (has_stencil_resolve) {
5871          anv_attachment_msaa_resolve(cmd_buffer, &gfx->stencil_att,
5872                                      gfx->stencil_att.layout,
5873                                      VK_IMAGE_ASPECT_STENCIL_BIT);
5874       }
5875    }
5876 
5877    trace_intel_end_render_pass(&cmd_buffer->trace,
5878                                gfx->render_area.extent.width,
5879                                gfx->render_area.extent.height,
5880                                gfx->color_att_count,
5881                                gfx->samples);
5882 
5883    anv_cmd_buffer_reset_rendering(cmd_buffer);
5884 }
5885 
5886 void
genX(cmd_emit_conditional_render_predicate)5887 genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer)
5888 {
5889    struct mi_builder b;
5890    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
5891 
5892    mi_store(&b, mi_reg64(MI_PREDICATE_SRC0),
5893                 mi_reg32(ANV_PREDICATE_RESULT_REG));
5894    mi_store(&b, mi_reg64(MI_PREDICATE_SRC1), mi_imm(0));
5895 
5896    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
5897       mip.LoadOperation    = LOAD_LOADINV;
5898       mip.CombineOperation = COMBINE_SET;
5899       mip.CompareOperation = COMPARE_SRCS_EQUAL;
5900    }
5901 }
5902 
genX(CmdBeginConditionalRenderingEXT)5903 void genX(CmdBeginConditionalRenderingEXT)(
5904    VkCommandBuffer                             commandBuffer,
5905    const VkConditionalRenderingBeginInfoEXT*   pConditionalRenderingBegin)
5906 {
5907    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5908    ANV_FROM_HANDLE(anv_buffer, buffer, pConditionalRenderingBegin->buffer);
5909    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
5910    struct anv_address value_address =
5911       anv_address_add(buffer->address, pConditionalRenderingBegin->offset);
5912 
5913    const bool isInverted = pConditionalRenderingBegin->flags &
5914                            VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
5915 
5916    cmd_state->conditional_render_enabled = true;
5917 
5918    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5919 
5920    struct mi_builder b;
5921    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
5922    const uint32_t mocs = anv_mocs_for_address(cmd_buffer->device, &value_address);
5923    mi_builder_set_mocs(&b, mocs);
5924 
5925    /* Section 19.4 of the Vulkan 1.1.85 spec says:
5926     *
5927     *    If the value of the predicate in buffer memory changes
5928     *    while conditional rendering is active, the rendering commands
5929     *    may be discarded in an implementation-dependent way.
5930     *    Some implementations may latch the value of the predicate
5931     *    upon beginning conditional rendering while others
5932     *    may read it before every rendering command.
5933     *
5934     * So it's perfectly fine to read a value from the buffer once.
5935     */
5936    struct mi_value value =  mi_mem32(value_address);
5937 
5938    /* Precompute predicate result, it is necessary to support secondary
5939     * command buffers since it is unknown if conditional rendering is
5940     * inverted when populating them.
5941     */
5942    mi_store(&b, mi_reg64(ANV_PREDICATE_RESULT_REG),
5943                 isInverted ? mi_uge(&b, mi_imm(0), value) :
5944                              mi_ult(&b, mi_imm(0), value));
5945 }
5946 
genX(CmdEndConditionalRenderingEXT)5947 void genX(CmdEndConditionalRenderingEXT)(
5948 	VkCommandBuffer                             commandBuffer)
5949 {
5950    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5951    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
5952 
5953    cmd_state->conditional_render_enabled = false;
5954 }
5955 
5956 /* Set of stage bits for which are pipelined, i.e. they get queued
5957  * by the command streamer for later execution.
5958  */
5959 #define ANV_PIPELINE_STAGE_PIPELINED_BITS \
5960    ~(VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT | \
5961      VK_PIPELINE_STAGE_2_DRAW_INDIRECT_BIT | \
5962      VK_PIPELINE_STAGE_2_HOST_BIT | \
5963      VK_PIPELINE_STAGE_2_CONDITIONAL_RENDERING_BIT_EXT)
5964 
genX(CmdSetEvent2)5965 void genX(CmdSetEvent2)(
5966     VkCommandBuffer                             commandBuffer,
5967     VkEvent                                     _event,
5968     const VkDependencyInfo*                     pDependencyInfo)
5969 {
5970    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
5971    ANV_FROM_HANDLE(anv_event, event, _event);
5972 
5973    switch (cmd_buffer->batch.engine_class) {
5974    case INTEL_ENGINE_CLASS_VIDEO:
5975    case INTEL_ENGINE_CLASS_COPY:
5976       anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
5977          flush.PostSyncOperation = WriteImmediateData;
5978          flush.Address = anv_state_pool_state_address(
5979             &cmd_buffer->device->dynamic_state_pool,
5980             event->state);
5981          flush.ImmediateData = VK_EVENT_SET;
5982       }
5983       break;
5984 
5985    case INTEL_ENGINE_CLASS_RENDER:
5986    case INTEL_ENGINE_CLASS_COMPUTE: {
5987       VkPipelineStageFlags2 src_stages = 0;
5988 
5989       for (uint32_t i = 0; i < pDependencyInfo->memoryBarrierCount; i++)
5990          src_stages |= pDependencyInfo->pMemoryBarriers[i].srcStageMask;
5991       for (uint32_t i = 0; i < pDependencyInfo->bufferMemoryBarrierCount; i++)
5992          src_stages |= pDependencyInfo->pBufferMemoryBarriers[i].srcStageMask;
5993       for (uint32_t i = 0; i < pDependencyInfo->imageMemoryBarrierCount; i++)
5994          src_stages |= pDependencyInfo->pImageMemoryBarriers[i].srcStageMask;
5995 
5996       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
5997       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5998 
5999       enum anv_pipe_bits pc_bits = 0;
6000       if (src_stages & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6001          pc_bits |= ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
6002          pc_bits |= ANV_PIPE_CS_STALL_BIT;
6003       }
6004 
6005       genx_batch_emit_pipe_control_write
6006          (&cmd_buffer->batch, cmd_buffer->device->info,
6007           cmd_buffer->state.current_pipeline, WriteImmediateData,
6008           anv_state_pool_state_address(&cmd_buffer->device->dynamic_state_pool,
6009                                        event->state),
6010           VK_EVENT_SET, pc_bits);
6011       break;
6012    }
6013 
6014    default:
6015       unreachable("Invalid engine class");
6016    }
6017 }
6018 
genX(CmdResetEvent2)6019 void genX(CmdResetEvent2)(
6020     VkCommandBuffer                             commandBuffer,
6021     VkEvent                                     _event,
6022     VkPipelineStageFlags2                       stageMask)
6023 {
6024    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6025    ANV_FROM_HANDLE(anv_event, event, _event);
6026 
6027    switch (cmd_buffer->batch.engine_class) {
6028    case INTEL_ENGINE_CLASS_VIDEO:
6029    case INTEL_ENGINE_CLASS_COPY:
6030       anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), flush) {
6031          flush.PostSyncOperation = WriteImmediateData;
6032          flush.Address = anv_state_pool_state_address(
6033             &cmd_buffer->device->dynamic_state_pool,
6034             event->state);
6035          flush.ImmediateData = VK_EVENT_RESET;
6036       }
6037       break;
6038 
6039    case INTEL_ENGINE_CLASS_RENDER:
6040    case INTEL_ENGINE_CLASS_COMPUTE: {
6041       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
6042       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6043 
6044       enum anv_pipe_bits pc_bits = 0;
6045       if (stageMask & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6046          pc_bits |= ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
6047          pc_bits |= ANV_PIPE_CS_STALL_BIT;
6048       }
6049 
6050       genx_batch_emit_pipe_control_write
6051          (&cmd_buffer->batch, cmd_buffer->device->info,
6052           cmd_buffer->state.current_pipeline, WriteImmediateData,
6053           anv_state_pool_state_address(&cmd_buffer->device->dynamic_state_pool,
6054                                        event->state),
6055           VK_EVENT_RESET,
6056           pc_bits);
6057       break;
6058    }
6059 
6060    default:
6061       unreachable("Invalid engine class");
6062    }
6063 }
6064 
genX(CmdWaitEvents2)6065 void genX(CmdWaitEvents2)(
6066     VkCommandBuffer                             commandBuffer,
6067     uint32_t                                    eventCount,
6068     const VkEvent*                              pEvents,
6069     const VkDependencyInfo*                     pDependencyInfos)
6070 {
6071    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6072 
6073    for (uint32_t i = 0; i < eventCount; i++) {
6074       ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
6075 
6076       anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
6077          sem.WaitMode            = PollingMode;
6078          sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD;
6079          sem.SemaphoreDataDword  = VK_EVENT_SET;
6080          sem.SemaphoreAddress    = anv_state_pool_state_address(
6081             &cmd_buffer->device->dynamic_state_pool,
6082             event->state);
6083       }
6084    }
6085 
6086    cmd_buffer_barrier(cmd_buffer, eventCount, pDependencyInfos, "wait event");
6087 }
6088 
vk_to_intel_index_type(VkIndexType type)6089 static uint32_t vk_to_intel_index_type(VkIndexType type)
6090 {
6091    switch (type) {
6092    case VK_INDEX_TYPE_UINT8_KHR:
6093       return INDEX_BYTE;
6094    case VK_INDEX_TYPE_UINT16:
6095       return INDEX_WORD;
6096    case VK_INDEX_TYPE_UINT32:
6097       return INDEX_DWORD;
6098    default:
6099       unreachable("invalid index type");
6100    }
6101 }
6102 
genX(CmdBindIndexBuffer2KHR)6103 void genX(CmdBindIndexBuffer2KHR)(
6104     VkCommandBuffer                             commandBuffer,
6105     VkBuffer                                    _buffer,
6106     VkDeviceSize                                offset,
6107     VkDeviceSize                                size,
6108     VkIndexType                                 indexType)
6109 {
6110    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6111    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
6112 
6113    uint32_t restart_index = vk_index_to_restart(indexType);
6114    if (cmd_buffer->state.gfx.restart_index != restart_index) {
6115       cmd_buffer->state.gfx.restart_index = restart_index;
6116       cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RESTART_INDEX;
6117    }
6118 
6119    uint32_t index_size = buffer ? vk_buffer_range(&buffer->vk, offset, size) : 0;
6120    uint32_t index_type = vk_to_intel_index_type(indexType);
6121    if (cmd_buffer->state.gfx.index_buffer != buffer ||
6122        cmd_buffer->state.gfx.index_type != index_type ||
6123        cmd_buffer->state.gfx.index_offset != offset ||
6124        cmd_buffer->state.gfx.index_size != index_size) {
6125       cmd_buffer->state.gfx.index_buffer = buffer;
6126       cmd_buffer->state.gfx.index_type = vk_to_intel_index_type(indexType);
6127       cmd_buffer->state.gfx.index_offset = offset;
6128       cmd_buffer->state.gfx.index_size = index_size;
6129       cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
6130    }
6131 }
6132 
genX(CmdSetPerformanceOverrideINTEL)6133 VkResult genX(CmdSetPerformanceOverrideINTEL)(
6134     VkCommandBuffer                             commandBuffer,
6135     const VkPerformanceOverrideInfoINTEL*       pOverrideInfo)
6136 {
6137    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6138 
6139    switch (pOverrideInfo->type) {
6140    case VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL: {
6141       anv_batch_write_reg(&cmd_buffer->batch, GENX(CS_DEBUG_MODE2), csdm2) {
6142          csdm2._3DRenderingInstructionDisable = pOverrideInfo->enable;
6143          csdm2.MediaInstructionDisable = pOverrideInfo->enable;
6144          csdm2._3DRenderingInstructionDisableMask = true;
6145          csdm2.MediaInstructionDisableMask = true;
6146       }
6147       break;
6148    }
6149 
6150    case VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL:
6151       if (pOverrideInfo->enable) {
6152          /* FLUSH ALL THE THINGS! As requested by the MDAPI team. */
6153          anv_add_pending_pipe_bits(cmd_buffer,
6154                                    ANV_PIPE_BARRIER_FLUSH_BITS |
6155                                    ANV_PIPE_INVALIDATE_BITS,
6156                                    "perf counter isolation");
6157          genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6158       }
6159       break;
6160 
6161    default:
6162       unreachable("Invalid override");
6163    }
6164 
6165    return VK_SUCCESS;
6166 }
6167 
genX(CmdSetPerformanceStreamMarkerINTEL)6168 VkResult genX(CmdSetPerformanceStreamMarkerINTEL)(
6169     VkCommandBuffer                             commandBuffer,
6170     const VkPerformanceStreamMarkerInfoINTEL*   pMarkerInfo)
6171 {
6172    /* TODO: Waiting on the register to write, might depend on generation. */
6173 
6174    return VK_SUCCESS;
6175 }
6176 
6177 #define TIMESTAMP 0x2358
6178 
genX(cmd_emit_timestamp)6179 void genX(cmd_emit_timestamp)(struct anv_batch *batch,
6180                               struct anv_device *device,
6181                               struct anv_address addr,
6182                               enum anv_timestamp_capture_type type,
6183                               void *data) {
6184    /* Make sure ANV_TIMESTAMP_CAPTURE_AT_CS_STALL and
6185     * ANV_TIMESTAMP_REWRITE_COMPUTE_WALKER capture type are not set for
6186     * transfer queue.
6187     */
6188    if ((batch->engine_class == INTEL_ENGINE_CLASS_COPY) ||
6189        (batch->engine_class == INTEL_ENGINE_CLASS_VIDEO)) {
6190       assert(type != ANV_TIMESTAMP_CAPTURE_AT_CS_STALL &&
6191              type != ANV_TIMESTAMP_REWRITE_COMPUTE_WALKER);
6192    }
6193 
6194    switch (type) {
6195    case ANV_TIMESTAMP_CAPTURE_TOP_OF_PIPE: {
6196       struct mi_builder b;
6197       mi_builder_init(&b, device->info, batch);
6198       mi_store(&b, mi_mem64(addr), mi_reg64(TIMESTAMP));
6199       break;
6200    }
6201 
6202    case ANV_TIMESTAMP_CAPTURE_END_OF_PIPE: {
6203       if ((batch->engine_class == INTEL_ENGINE_CLASS_COPY) ||
6204           (batch->engine_class == INTEL_ENGINE_CLASS_VIDEO)) {
6205          /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
6206          if (intel_needs_workaround(device->info, 16018063123))
6207             genX(batch_emit_fast_color_dummy_blit)(batch, device);
6208          anv_batch_emit(batch, GENX(MI_FLUSH_DW), fd) {
6209             fd.PostSyncOperation = WriteTimestamp;
6210             fd.Address = addr;
6211          }
6212       } else {
6213          genx_batch_emit_pipe_control_write(batch, device->info, 0,
6214                                             WriteTimestamp, addr, 0, 0);
6215       }
6216       break;
6217    }
6218 
6219    case ANV_TIMESTAMP_CAPTURE_AT_CS_STALL:
6220       genx_batch_emit_pipe_control_write
6221            (batch, device->info, 0, WriteTimestamp, addr, 0,
6222             ANV_PIPE_CS_STALL_BIT);
6223       break;
6224 
6225 #if GFX_VERx10 >= 125
6226    case ANV_TIMESTAMP_REWRITE_COMPUTE_WALKER: {
6227       uint32_t dwords[GENX(COMPUTE_WALKER_length)];
6228 
6229       GENX(COMPUTE_WALKER_pack)(batch, dwords, &(struct GENX(COMPUTE_WALKER)) {
6230             .body = {
6231                .PostSync = (struct GENX(POSTSYNC_DATA)) {
6232                   .Operation = WriteTimestamp,
6233                   .DestinationAddress = addr,
6234                   .MOCS = anv_mocs(device, NULL, 0),
6235                },
6236             }
6237          });
6238 
6239       for (uint32_t i = 0; i < ARRAY_SIZE(dwords); i++) {
6240          if (dwords[i])
6241             ((uint32_t *)data)[i] |= dwords[i];
6242       }
6243       break;
6244    }
6245 
6246    case ANV_TIMESTAMP_REWRITE_INDIRECT_DISPATCH: {
6247       uint32_t dwords[GENX(EXECUTE_INDIRECT_DISPATCH_length)];
6248 
6249       GENX(EXECUTE_INDIRECT_DISPATCH_pack)
6250       (batch, dwords, &(struct GENX(EXECUTE_INDIRECT_DISPATCH)) {
6251             .MOCS = anv_mocs(device, NULL, 0),
6252             .COMPUTE_WALKER_BODY = {
6253                .PostSync = (struct GENX(POSTSYNC_DATA)) {
6254                   .Operation = WriteTimestamp,
6255                   .DestinationAddress = addr,
6256                   .MOCS = anv_mocs(device, NULL, 0),
6257                },
6258             }
6259       });
6260 
6261       for (uint32_t i = 0; i < ARRAY_SIZE(dwords); i++) {
6262          if (dwords[i])
6263             ((uint32_t *)data)[i] |= dwords[i];
6264       }
6265       break;
6266    }
6267 #endif
6268 
6269    default:
6270       unreachable("invalid");
6271    }
6272 }
6273 
genX(cmd_capture_data)6274 void genX(cmd_capture_data)(struct anv_batch *batch,
6275                             struct anv_device *device,
6276                             struct anv_address dst_addr,
6277                             struct anv_address src_addr,
6278                             uint32_t size_B) {
6279    struct mi_builder b;
6280    mi_builder_init(&b, device->info, batch);
6281    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
6282    mi_memcpy(&b, dst_addr, src_addr, size_B);
6283 }
6284 
genX(batch_emit_secondary_call)6285 void genX(batch_emit_secondary_call)(struct anv_batch *batch,
6286                                      struct anv_device *device,
6287                                      struct anv_address secondary_addr,
6288                                      struct anv_address secondary_return_addr)
6289 {
6290    struct mi_builder b;
6291    mi_builder_init(&b, device->info, batch);
6292    mi_builder_set_mocs(&b, isl_mocs(&device->isl_dev, 0, false));
6293    /* Make sure the write in the batch buffer lands before we just execute the
6294     * jump.
6295     */
6296    mi_builder_set_write_check(&b, true);
6297 
6298    /* Emit a write to change the return address of the secondary */
6299    struct mi_reloc_imm_token reloc =
6300       mi_store_relocated_imm(&b, mi_mem64(secondary_return_addr));
6301 
6302    /* Ensure the write have landed before CS reads the address written
6303     * above
6304     */
6305    mi_ensure_write_fence(&b);
6306 
6307 #if GFX_VER >= 12
6308    /* Disable prefetcher before jumping into a secondary */
6309    anv_batch_emit(batch, GENX(MI_ARB_CHECK), arb) {
6310       arb.PreParserDisableMask = true;
6311       arb.PreParserDisable = true;
6312    }
6313 #endif
6314 
6315    /* Jump into the secondary */
6316    anv_batch_emit(batch, GENX(MI_BATCH_BUFFER_START), bbs) {
6317       bbs.AddressSpaceIndicator = ASI_PPGTT;
6318       bbs.SecondLevelBatchBuffer = Firstlevelbatch;
6319       bbs.BatchBufferStartAddress = secondary_addr;
6320    }
6321 
6322    /* Replace the return address written by the MI_STORE_DATA_IMM above with
6323     * the primary's current batch address (immediately after the jump).
6324     */
6325    mi_relocate_store_imm(reloc,
6326                          anv_address_physical(
6327                             anv_batch_current_address(batch)));
6328 }
6329 
6330 void *
genX(batch_emit_return)6331 genX(batch_emit_return)(struct anv_batch *batch)
6332 {
6333    return anv_batch_emitn(batch,
6334                           GENX(MI_BATCH_BUFFER_START_length),
6335                           GENX(MI_BATCH_BUFFER_START),
6336                           .AddressSpaceIndicator = ASI_PPGTT,
6337                           .SecondLevelBatchBuffer = Firstlevelbatch);
6338 }
6339 
6340 /* Wa_16018063123 */
6341 ALWAYS_INLINE void
genX(batch_emit_fast_color_dummy_blit)6342 genX(batch_emit_fast_color_dummy_blit)(struct anv_batch *batch,
6343                                       struct anv_device *device)
6344 {
6345 #if GFX_VERx10 >= 125
6346    anv_batch_emit(batch, GENX(XY_FAST_COLOR_BLT), blt) {
6347       blt.DestinationBaseAddress = device->workaround_address;
6348       blt.DestinationMOCS = device->isl_dev.mocs.blitter_dst;
6349       blt.DestinationPitch = 63;
6350       blt.DestinationX2 = 1;
6351       blt.DestinationY2 = 4;
6352       blt.DestinationSurfaceWidth = 1;
6353       blt.DestinationSurfaceHeight = 4;
6354       blt.DestinationSurfaceType = XY_SURFTYPE_2D;
6355       blt.DestinationSurfaceQPitch = 4;
6356       blt.DestinationTiling = XY_TILE_LINEAR;
6357    }
6358 #endif
6359 }
6360 
6361 void
genX(urb_workaround)6362 genX(urb_workaround)(struct anv_cmd_buffer *cmd_buffer,
6363                      const struct intel_urb_config *urb_cfg)
6364 {
6365 #if INTEL_NEEDS_WA_16014912113
6366    const struct intel_urb_config *current =
6367       &cmd_buffer->state.gfx.urb_cfg;
6368    if (intel_urb_setup_changed(urb_cfg, current, MESA_SHADER_TESS_EVAL) &&
6369        current->size[0] != 0) {
6370       for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
6371 #if GFX_VER >= 12
6372          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_URB_ALLOC_VS), urb) {
6373             urb._3DCommandSubOpcode             += i;
6374             urb.VSURBEntryAllocationSize        = current->size[i] - 1;
6375             urb.VSURBStartingAddressSlice0      = current->start[i];
6376             urb.VSURBStartingAddressSliceN      = current->start[i];
6377             urb.VSNumberofURBEntriesSlice0      = i == 0 ? 256 : 0;
6378             urb.VSNumberofURBEntriesSliceN      = i == 0 ? 256 : 0;
6379          }
6380 #else
6381          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_URB_VS), urb) {
6382             urb._3DCommandSubOpcode      += i;
6383             urb.VSURBStartingAddress      = current->start[i];
6384             urb.VSURBEntryAllocationSize  = current->size[i] - 1;
6385             urb.VSNumberofURBEntries      = i == 0 ? 256 : 0;
6386          }
6387 #endif
6388       }
6389       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
6390          pc.HDCPipelineFlushEnable = true;
6391       }
6392    }
6393 #endif
6394 }
6395 
6396 struct anv_state
genX(cmd_buffer_begin_companion_rcs_syncpoint)6397 genX(cmd_buffer_begin_companion_rcs_syncpoint)(
6398       struct anv_cmd_buffer   *cmd_buffer)
6399 {
6400 #if GFX_VERx10 >= 125
6401    const struct intel_device_info *info = cmd_buffer->device->info;
6402    struct anv_state syncpoint =
6403       anv_cmd_buffer_alloc_temporary_state(cmd_buffer, 2 * sizeof(uint32_t), 4);
6404    struct anv_address xcs_wait_addr =
6405       anv_cmd_buffer_temporary_state_address(cmd_buffer, syncpoint);
6406    struct anv_address rcs_wait_addr = anv_address_add(xcs_wait_addr, 4);
6407 
6408    /* Reset the sync point */
6409    memset(syncpoint.map, 0, 2 * sizeof(uint32_t));
6410 
6411    struct mi_builder b;
6412 
6413    /* On CCS:
6414     *    - flush all caches & invalidate
6415     *    - unblock RCS
6416     *    - wait on RCS to complete
6417     *    - clear the value we waited on
6418     */
6419 
6420    if (anv_cmd_buffer_is_compute_queue(cmd_buffer)) {
6421       anv_add_pending_pipe_bits(cmd_buffer, ANV_PIPE_BARRIER_FLUSH_BITS |
6422                                             ANV_PIPE_INVALIDATE_BITS |
6423                                             ANV_PIPE_STALL_BITS,
6424                                 "post main cmd buffer invalidate");
6425       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6426    } else if (anv_cmd_buffer_is_blitter_queue(cmd_buffer)) {
6427       /* Wa_16018063123 - emit fast color dummy blit before MI_FLUSH_DW. */
6428       if (intel_needs_workaround(cmd_buffer->device->info, 16018063123)) {
6429          genX(batch_emit_fast_color_dummy_blit)(&cmd_buffer->batch,
6430                                                 cmd_buffer->device);
6431       }
6432       anv_batch_emit(&cmd_buffer->batch, GENX(MI_FLUSH_DW), fd) {
6433          fd.FlushCCS = true; /* Maybe handle Flush LLC */
6434       }
6435    }
6436 
6437    {
6438       mi_builder_init(&b, info, &cmd_buffer->batch);
6439       mi_store(&b, mi_mem32(rcs_wait_addr), mi_imm(0x1));
6440       anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
6441          sem.WaitMode            = PollingMode;
6442          sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD;
6443          sem.SemaphoreDataDword  = 0x1;
6444          sem.SemaphoreAddress    = xcs_wait_addr;
6445       }
6446       /* Make sure to reset the semaphore in case the command buffer is run
6447        * multiple times.
6448        */
6449       mi_store(&b, mi_mem32(xcs_wait_addr), mi_imm(0x0));
6450    }
6451 
6452    /* On RCS:
6453     *    - wait on CCS signal
6454     *    - clear the value we waited on
6455     */
6456    {
6457       mi_builder_init(&b, info, &cmd_buffer->companion_rcs_cmd_buffer->batch);
6458       anv_batch_emit(&cmd_buffer->companion_rcs_cmd_buffer->batch,
6459                      GENX(MI_SEMAPHORE_WAIT),
6460                      sem) {
6461          sem.WaitMode            = PollingMode;
6462          sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD;
6463          sem.SemaphoreDataDword  = 0x1;
6464          sem.SemaphoreAddress    = rcs_wait_addr;
6465       }
6466       /* Make sure to reset the semaphore in case the command buffer is run
6467        * multiple times.
6468        */
6469       mi_store(&b, mi_mem32(rcs_wait_addr), mi_imm(0x0));
6470    }
6471 
6472    return syncpoint;
6473 #else
6474    unreachable("Not implemented");
6475 #endif
6476 }
6477 
6478 void
genX(cmd_buffer_end_companion_rcs_syncpoint)6479 genX(cmd_buffer_end_companion_rcs_syncpoint)(struct anv_cmd_buffer *cmd_buffer,
6480                                              struct anv_state syncpoint)
6481 {
6482 #if GFX_VERx10 >= 125
6483    struct anv_address xcs_wait_addr =
6484       anv_cmd_buffer_temporary_state_address(cmd_buffer, syncpoint);
6485 
6486    struct mi_builder b;
6487 
6488    /* On RCS:
6489     *    - flush all caches & invalidate
6490     *    - unblock the CCS
6491     */
6492    anv_add_pending_pipe_bits(cmd_buffer->companion_rcs_cmd_buffer,
6493                              ANV_PIPE_BARRIER_FLUSH_BITS |
6494                              ANV_PIPE_INVALIDATE_BITS |
6495                              ANV_PIPE_STALL_BITS,
6496                              "post rcs flush");
6497    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer->companion_rcs_cmd_buffer);
6498 
6499    mi_builder_init(&b, cmd_buffer->device->info,
6500                    &cmd_buffer->companion_rcs_cmd_buffer->batch);
6501    mi_store(&b, mi_mem32(xcs_wait_addr), mi_imm(0x1));
6502 #else
6503    unreachable("Not implemented");
6504 #endif
6505 }
6506 
6507 void
genX(write_trtt_entries)6508 genX(write_trtt_entries)(struct anv_async_submit *submit,
6509                          struct anv_trtt_bind *l3l2_binds,
6510                          uint32_t n_l3l2_binds,
6511                          struct anv_trtt_bind *l1_binds,
6512                          uint32_t n_l1_binds)
6513 {
6514 #if GFX_VER >= 12
6515    const struct intel_device_info *devinfo =
6516       submit->queue->device->info;
6517    struct anv_batch *batch = &submit->batch;
6518 
6519    /* BSpec says:
6520     *   "DWord Length programmed must not exceed 0x3FE."
6521     * For a single dword write the programmed length is 2, and for a single
6522     * qword it's 3. This is the value we actually write to the register field,
6523     * so it's not considering the bias.
6524     */
6525    uint32_t dword_write_len = 2;
6526    uint32_t qword_write_len = 3;
6527    uint32_t max_dword_extra_writes = 0x3FE - dword_write_len;
6528    uint32_t max_qword_extra_writes = (0x3FE - qword_write_len) / 2;
6529 
6530    /* What makes the code below quite complicated is the fact that we can
6531     * write multiple values with MI_STORE_DATA_IMM as long as the writes go to
6532     * contiguous addresses.
6533     */
6534 
6535    for (uint32_t i = 0; i < n_l3l2_binds; i++) {
6536       int extra_writes = 0;
6537       for (uint32_t j = i + 1;
6538            j < n_l3l2_binds && extra_writes <= max_qword_extra_writes;
6539            j++) {
6540          if (l3l2_binds[i].pte_addr + (j - i) * 8 == l3l2_binds[j].pte_addr) {
6541             extra_writes++;
6542          } else {
6543             break;
6544          }
6545       }
6546       bool is_last_write = n_l1_binds == 0 &&
6547                            i + extra_writes + 1 == n_l3l2_binds;
6548 
6549       uint32_t total_len = GENX(MI_STORE_DATA_IMM_length_bias) +
6550                            qword_write_len + (extra_writes * 2);
6551       uint32_t *dw;
6552       dw = anv_batch_emitn(batch, total_len, GENX(MI_STORE_DATA_IMM),
6553          .ForceWriteCompletionCheck = is_last_write,
6554          .StoreQword = true,
6555          .Address = anv_address_from_u64(l3l2_binds[i].pte_addr),
6556       );
6557       dw += 3;
6558       for (uint32_t j = 0; j < extra_writes + 1; j++) {
6559          uint64_t entry_addr_64b = l3l2_binds[i + j].entry_addr;
6560          *dw = entry_addr_64b & 0xFFFFFFFF;
6561          dw++;
6562          *dw = (entry_addr_64b >> 32) & 0xFFFFFFFF;
6563          dw++;
6564       }
6565       assert(dw == batch->next);
6566 
6567       i += extra_writes;
6568    }
6569 
6570    for (uint32_t i = 0; i < n_l1_binds; i++) {
6571       int extra_writes = 0;
6572       for (uint32_t j = i + 1;
6573            j < n_l1_binds && extra_writes <= max_dword_extra_writes;
6574            j++) {
6575          if (l1_binds[i].pte_addr + (j - i) * 4 ==
6576              l1_binds[j].pte_addr) {
6577             extra_writes++;
6578          } else {
6579             break;
6580          }
6581       }
6582 
6583       bool is_last_write = i + extra_writes + 1 == n_l1_binds;
6584 
6585       uint32_t total_len = GENX(MI_STORE_DATA_IMM_length_bias) +
6586                            dword_write_len + extra_writes;
6587       uint32_t *dw;
6588       dw = anv_batch_emitn(batch, total_len, GENX(MI_STORE_DATA_IMM),
6589          .ForceWriteCompletionCheck = is_last_write,
6590          .Address = anv_address_from_u64(l1_binds[i].pte_addr),
6591       );
6592       dw += 3;
6593       for (uint32_t j = 0; j < extra_writes + 1; j++) {
6594          *dw = (l1_binds[i + j].entry_addr >> 16) & 0xFFFFFFFF;
6595          dw++;
6596       }
6597       assert(dw == batch->next);
6598 
6599       i += extra_writes;
6600    }
6601 
6602    genx_batch_emit_pipe_control(batch, devinfo, _3D,
6603                                 ANV_PIPE_CS_STALL_BIT |
6604                                 ANV_PIPE_TLB_INVALIDATE_BIT);
6605 #else
6606    unreachable("Not implemented");
6607 #endif
6608 }
6609 
6610 void
genX(async_submit_end)6611 genX(async_submit_end)(struct anv_async_submit *submit)
6612 {
6613    struct anv_batch *batch = &submit->batch;
6614    anv_batch_emit(batch, GENX(MI_BATCH_BUFFER_END), bbe);
6615 }
6616 
6617 void
genX(CmdWriteBufferMarker2AMD)6618 genX(CmdWriteBufferMarker2AMD)(VkCommandBuffer commandBuffer,
6619                                VkPipelineStageFlags2 stage,
6620                                VkBuffer dstBuffer,
6621                                VkDeviceSize dstOffset,
6622                                uint32_t marker)
6623 {
6624    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6625    ANV_FROM_HANDLE(anv_buffer, buffer, dstBuffer);
6626 
6627    /* The barriers inserted by the application to make dstBuffer writable
6628     * should already have the L1/L2 cache flushes. On platforms where the
6629     * command streamer is not coherent with L3, we need an additional set of
6630     * cache flushes.
6631     */
6632    enum anv_pipe_bits bits =
6633       (ANV_DEVINFO_HAS_COHERENT_L3_CS(cmd_buffer->device->info) ? 0 :
6634        (ANV_PIPE_DATA_CACHE_FLUSH_BIT | ANV_PIPE_TILE_CACHE_FLUSH_BIT)) |
6635       ANV_PIPE_END_OF_PIPE_SYNC_BIT;
6636 
6637    trace_intel_begin_write_buffer_marker(&cmd_buffer->trace);
6638 
6639    anv_add_pending_pipe_bits(cmd_buffer, bits, "write buffer marker");
6640    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6641 
6642    struct mi_builder b;
6643    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
6644 
6645    /* Emitting a PIPE_CONTROL with Post-Sync Op = Write Immediate Data
6646     * would be the logical way to implement this extension, as it could
6647     * do a pipelined marker write.  Unfortunately, it requires writing
6648     * whole 64-bit QWords, and VK_AMD_buffer_marker requires writing a
6649     * 32-bit value.  MI_STORE_DATA_IMM is the only good way to do that,
6650     * and unfortunately it requires stalling.
6651     */
6652    mi_store(&b, mi_mem32(anv_address_add(buffer->address, dstOffset)),
6653                 mi_imm(marker));
6654 
6655    trace_intel_end_write_buffer_marker(&cmd_buffer->trace);
6656 }
6657 
6658 void
genX(cmd_write_buffer_cp)6659 genX(cmd_write_buffer_cp)(struct anv_cmd_buffer *cmd_buffer,
6660                           VkDeviceAddress dstAddr,
6661                           void *data,
6662                           uint32_t size)
6663 {
6664    assert(size % 4 == 0);
6665    struct anv_address addr = anv_address_from_u64(dstAddr);
6666 
6667    struct mi_builder b;
6668    mi_builder_init(&b, cmd_buffer->device->info, &cmd_buffer->batch);
6669 
6670    for (uint32_t i = 0; i < size; i += 8) {
6671       mi_builder_set_write_check(&b, i >= size - 8);
6672       if (size - i < 8) {
6673          mi_store(&b, mi_mem32(anv_address_add(addr, i)),
6674                       mi_imm(*((uint32_t *)((char*)data + i))));
6675       } else {
6676          mi_store(&b, mi_mem64(anv_address_add(addr, i)),
6677                       mi_imm(*((uint64_t *)((char*)data + i))));
6678       }
6679    }
6680 }
6681