• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Red Hat.
3  * Copyright © 2016 Bas Nieuwenhuizen
4  *
5  * based in part on anv driver which is:
6  * Copyright © 2015 Intel Corporation
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include "tu_private.h"
29 
30 #include "adreno_pm4.xml.h"
31 #include "adreno_common.xml.h"
32 
33 #include "vk_format.h"
34 #include "vk_util.h"
35 
36 #include "tu_cs.h"
37 
38 void
tu6_emit_event_write(struct tu_cmd_buffer * cmd,struct tu_cs * cs,enum vgt_event_type event)39 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
40                      struct tu_cs *cs,
41                      enum vgt_event_type event)
42 {
43    bool need_seqno = false;
44    switch (event) {
45    case CACHE_FLUSH_TS:
46    case WT_DONE_TS:
47    case RB_DONE_TS:
48    case PC_CCU_FLUSH_DEPTH_TS:
49    case PC_CCU_FLUSH_COLOR_TS:
50    case PC_CCU_RESOLVE_TS:
51       need_seqno = true;
52       break;
53    default:
54       break;
55    }
56 
57    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, need_seqno ? 4 : 1);
58    tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(event));
59    if (need_seqno) {
60       tu_cs_emit_qw(cs, global_iova(cmd, seqno_dummy));
61       tu_cs_emit(cs, 0);
62    }
63 }
64 
65 static void
tu6_emit_flushes(struct tu_cmd_buffer * cmd_buffer,struct tu_cs * cs,enum tu_cmd_flush_bits flushes)66 tu6_emit_flushes(struct tu_cmd_buffer *cmd_buffer,
67                  struct tu_cs *cs,
68                  enum tu_cmd_flush_bits flushes)
69 {
70    /* Experiments show that invalidating CCU while it still has data in it
71     * doesn't work, so make sure to always flush before invalidating in case
72     * any data remains that hasn't yet been made available through a barrier.
73     * However it does seem to work for UCHE.
74     */
75    if (flushes & (TU_CMD_FLAG_CCU_FLUSH_COLOR |
76                   TU_CMD_FLAG_CCU_INVALIDATE_COLOR))
77       tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_COLOR_TS);
78    if (flushes & (TU_CMD_FLAG_CCU_FLUSH_DEPTH |
79                   TU_CMD_FLAG_CCU_INVALIDATE_DEPTH))
80       tu6_emit_event_write(cmd_buffer, cs, PC_CCU_FLUSH_DEPTH_TS);
81    if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_COLOR)
82       tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_COLOR);
83    if (flushes & TU_CMD_FLAG_CCU_INVALIDATE_DEPTH)
84       tu6_emit_event_write(cmd_buffer, cs, PC_CCU_INVALIDATE_DEPTH);
85    if (flushes & TU_CMD_FLAG_CACHE_FLUSH)
86       tu6_emit_event_write(cmd_buffer, cs, CACHE_FLUSH_TS);
87    if (flushes & TU_CMD_FLAG_CACHE_INVALIDATE)
88       tu6_emit_event_write(cmd_buffer, cs, CACHE_INVALIDATE);
89    if (flushes & TU_CMD_FLAG_WAIT_MEM_WRITES)
90       tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
91    if (flushes & TU_CMD_FLAG_WAIT_FOR_IDLE)
92       tu_cs_emit_wfi(cs);
93    if (flushes & TU_CMD_FLAG_WAIT_FOR_ME)
94       tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
95 }
96 
97 /* "Normal" cache flushes, that don't require any special handling */
98 
99 static void
tu_emit_cache_flush(struct tu_cmd_buffer * cmd_buffer,struct tu_cs * cs)100 tu_emit_cache_flush(struct tu_cmd_buffer *cmd_buffer,
101                     struct tu_cs *cs)
102 {
103    tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.cache.flush_bits);
104    cmd_buffer->state.cache.flush_bits = 0;
105 }
106 
107 /* Renderpass cache flushes */
108 
109 void
tu_emit_cache_flush_renderpass(struct tu_cmd_buffer * cmd_buffer,struct tu_cs * cs)110 tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
111                                struct tu_cs *cs)
112 {
113    tu6_emit_flushes(cmd_buffer, cs, cmd_buffer->state.renderpass_cache.flush_bits);
114    cmd_buffer->state.renderpass_cache.flush_bits = 0;
115 }
116 
117 /* Cache flushes for things that use the color/depth read/write path (i.e.
118  * blits and draws). This deals with changing CCU state as well as the usual
119  * cache flushing.
120  */
121 
122 void
tu_emit_cache_flush_ccu(struct tu_cmd_buffer * cmd_buffer,struct tu_cs * cs,enum tu_cmd_ccu_state ccu_state)123 tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
124                         struct tu_cs *cs,
125                         enum tu_cmd_ccu_state ccu_state)
126 {
127    enum tu_cmd_flush_bits flushes = cmd_buffer->state.cache.flush_bits;
128 
129    assert(ccu_state != TU_CMD_CCU_UNKNOWN);
130 
131    /* Changing CCU state must involve invalidating the CCU. In sysmem mode,
132     * the CCU may also contain data that we haven't flushed out yet, so we
133     * also need to flush. Also, in order to program RB_CCU_CNTL, we need to
134     * emit a WFI as it isn't pipelined.
135     */
136    if (ccu_state != cmd_buffer->state.ccu_state) {
137       if (cmd_buffer->state.ccu_state != TU_CMD_CCU_GMEM) {
138          flushes |=
139             TU_CMD_FLAG_CCU_FLUSH_COLOR |
140             TU_CMD_FLAG_CCU_FLUSH_DEPTH;
141          cmd_buffer->state.cache.pending_flush_bits &= ~(
142             TU_CMD_FLAG_CCU_FLUSH_COLOR |
143             TU_CMD_FLAG_CCU_FLUSH_DEPTH);
144       }
145       flushes |=
146          TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
147          TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
148          TU_CMD_FLAG_WAIT_FOR_IDLE;
149       cmd_buffer->state.cache.pending_flush_bits &= ~(
150          TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
151          TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
152          TU_CMD_FLAG_WAIT_FOR_IDLE);
153    }
154 
155    tu6_emit_flushes(cmd_buffer, cs, flushes);
156    cmd_buffer->state.cache.flush_bits = 0;
157 
158    if (ccu_state != cmd_buffer->state.ccu_state) {
159       struct tu_physical_device *phys_dev = cmd_buffer->device->physical_device;
160       tu_cs_emit_regs(cs,
161                       A6XX_RB_CCU_CNTL(.offset =
162                                           ccu_state == TU_CMD_CCU_GMEM ?
163                                           phys_dev->info.a6xx.ccu_offset_gmem :
164                                           phys_dev->info.a6xx.ccu_offset_bypass,
165                                        .gmem = ccu_state == TU_CMD_CCU_GMEM));
166       cmd_buffer->state.ccu_state = ccu_state;
167    }
168 }
169 
170 static void
tu6_emit_zs(struct tu_cmd_buffer * cmd,const struct tu_subpass * subpass,struct tu_cs * cs)171 tu6_emit_zs(struct tu_cmd_buffer *cmd,
172             const struct tu_subpass *subpass,
173             struct tu_cs *cs)
174 {
175    const struct tu_framebuffer *fb = cmd->state.framebuffer;
176 
177    const uint32_t a = subpass->depth_stencil_attachment.attachment;
178    if (a == VK_ATTACHMENT_UNUSED) {
179       tu_cs_emit_regs(cs,
180                       A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE),
181                       A6XX_RB_DEPTH_BUFFER_PITCH(0),
182                       A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(0),
183                       A6XX_RB_DEPTH_BUFFER_BASE(0),
184                       A6XX_RB_DEPTH_BUFFER_BASE_GMEM(0));
185 
186       tu_cs_emit_regs(cs,
187                       A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
188 
189       tu_cs_emit_regs(cs,
190                       A6XX_GRAS_LRZ_BUFFER_BASE(0),
191                       A6XX_GRAS_LRZ_BUFFER_PITCH(0),
192                       A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE(0));
193 
194       tu_cs_emit_regs(cs, A6XX_RB_STENCIL_INFO(0));
195 
196       return;
197    }
198 
199    const struct tu_image_view *iview = fb->attachments[a].attachment;
200    const struct tu_render_pass_attachment *attachment =
201       &cmd->state.pass->attachments[a];
202    enum a6xx_depth_format fmt = tu6_pipe2depth(attachment->format);
203 
204    tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
205    tu_cs_emit(cs, A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt).value);
206    tu_cs_image_ref(cs, iview, 0);
207    tu_cs_emit(cs, attachment->gmem_offset);
208 
209    tu_cs_emit_regs(cs,
210                    A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
211 
212    tu_cs_emit_pkt4(cs, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
213    tu_cs_image_flag_ref(cs, iview, 0);
214 
215    tu_cs_emit_regs(cs, A6XX_GRAS_LRZ_BUFFER_BASE(.bo = iview->image->bo,
216                                                  .bo_offset = iview->image->bo_offset + iview->image->lrz_offset),
217                    A6XX_GRAS_LRZ_BUFFER_PITCH(.pitch = iview->image->lrz_pitch),
218                    A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO(0),
219                    A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI(0));
220 
221    if (attachment->format == VK_FORMAT_D32_SFLOAT_S8_UINT ||
222        attachment->format == VK_FORMAT_S8_UINT) {
223 
224       tu_cs_emit_pkt4(cs, REG_A6XX_RB_STENCIL_INFO, 6);
225       tu_cs_emit(cs, A6XX_RB_STENCIL_INFO(.separate_stencil = true).value);
226       if (attachment->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
227          tu_cs_image_stencil_ref(cs, iview, 0);
228          tu_cs_emit(cs, attachment->gmem_offset_stencil);
229       } else {
230          tu_cs_image_ref(cs, iview, 0);
231          tu_cs_emit(cs, attachment->gmem_offset);
232       }
233    } else {
234       tu_cs_emit_regs(cs,
235                      A6XX_RB_STENCIL_INFO(0));
236    }
237 }
238 
239 static void
tu6_emit_mrt(struct tu_cmd_buffer * cmd,const struct tu_subpass * subpass,struct tu_cs * cs)240 tu6_emit_mrt(struct tu_cmd_buffer *cmd,
241              const struct tu_subpass *subpass,
242              struct tu_cs *cs)
243 {
244    const struct tu_framebuffer *fb = cmd->state.framebuffer;
245 
246    for (uint32_t i = 0; i < subpass->color_count; ++i) {
247       uint32_t a = subpass->color_attachments[i].attachment;
248       if (a == VK_ATTACHMENT_UNUSED)
249          continue;
250 
251       const struct tu_image_view *iview = fb->attachments[a].attachment;
252 
253       tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_BUF_INFO(i), 6);
254       tu_cs_emit(cs, iview->RB_MRT_BUF_INFO);
255       tu_cs_image_ref(cs, iview, 0);
256       tu_cs_emit(cs, cmd->state.pass->attachments[a].gmem_offset);
257 
258       tu_cs_emit_regs(cs,
259                       A6XX_SP_FS_MRT_REG(i, .dword = iview->SP_FS_MRT_REG));
260 
261       tu_cs_emit_pkt4(cs, REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(i), 3);
262       tu_cs_image_flag_ref(cs, iview, 0);
263    }
264 
265    tu_cs_emit_regs(cs,
266                    A6XX_RB_SRGB_CNTL(.dword = subpass->srgb_cntl));
267    tu_cs_emit_regs(cs,
268                    A6XX_SP_SRGB_CNTL(.dword = subpass->srgb_cntl));
269 
270    unsigned layers = MAX2(fb->layers, util_logbase2(subpass->multiview_mask) + 1);
271    tu_cs_emit_regs(cs, A6XX_GRAS_MAX_LAYER_INDEX(layers - 1));
272 }
273 
274 void
tu6_emit_msaa(struct tu_cs * cs,VkSampleCountFlagBits vk_samples)275 tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits vk_samples)
276 {
277    const enum a3xx_msaa_samples samples = tu_msaa_samples(vk_samples);
278    bool msaa_disable = samples == MSAA_ONE;
279 
280    tu_cs_emit_regs(cs,
281                    A6XX_SP_TP_RAS_MSAA_CNTL(samples),
282                    A6XX_SP_TP_DEST_MSAA_CNTL(.samples = samples,
283                                              .msaa_disable = msaa_disable));
284 
285    tu_cs_emit_regs(cs,
286                    A6XX_GRAS_RAS_MSAA_CNTL(samples),
287                    A6XX_GRAS_DEST_MSAA_CNTL(.samples = samples,
288                                             .msaa_disable = msaa_disable));
289 
290    tu_cs_emit_regs(cs,
291                    A6XX_RB_RAS_MSAA_CNTL(samples),
292                    A6XX_RB_DEST_MSAA_CNTL(.samples = samples,
293                                           .msaa_disable = msaa_disable));
294 
295    tu_cs_emit_regs(cs,
296                    A6XX_RB_MSAA_CNTL(samples));
297 }
298 
299 static void
tu6_emit_bin_size(struct tu_cs * cs,uint32_t bin_w,uint32_t bin_h,uint32_t flags)300 tu6_emit_bin_size(struct tu_cs *cs,
301                   uint32_t bin_w, uint32_t bin_h, uint32_t flags)
302 {
303    tu_cs_emit_regs(cs,
304                    A6XX_GRAS_BIN_CONTROL(.binw = bin_w,
305                                          .binh = bin_h,
306                                          .dword = flags));
307 
308    tu_cs_emit_regs(cs,
309                    A6XX_RB_BIN_CONTROL(.binw = bin_w,
310                                        .binh = bin_h,
311                                        .dword = flags));
312 
313    /* no flag for RB_BIN_CONTROL2... */
314    tu_cs_emit_regs(cs,
315                    A6XX_RB_BIN_CONTROL2(.binw = bin_w,
316                                         .binh = bin_h));
317 }
318 
319 static void
tu6_emit_render_cntl(struct tu_cmd_buffer * cmd,const struct tu_subpass * subpass,struct tu_cs * cs,bool binning)320 tu6_emit_render_cntl(struct tu_cmd_buffer *cmd,
321                      const struct tu_subpass *subpass,
322                      struct tu_cs *cs,
323                      bool binning)
324 {
325    const struct tu_framebuffer *fb = cmd->state.framebuffer;
326    uint32_t cntl = 0;
327    cntl |= A6XX_RB_RENDER_CNTL_UNK4;
328    if (binning) {
329       cntl |= A6XX_RB_RENDER_CNTL_BINNING;
330    } else {
331       uint32_t mrts_ubwc_enable = 0;
332       for (uint32_t i = 0; i < subpass->color_count; ++i) {
333          uint32_t a = subpass->color_attachments[i].attachment;
334          if (a == VK_ATTACHMENT_UNUSED)
335             continue;
336 
337          const struct tu_image_view *iview = fb->attachments[a].attachment;
338          if (iview->ubwc_enabled)
339             mrts_ubwc_enable |= 1 << i;
340       }
341 
342       cntl |= A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable);
343 
344       const uint32_t a = subpass->depth_stencil_attachment.attachment;
345       if (a != VK_ATTACHMENT_UNUSED) {
346          const struct tu_image_view *iview = fb->attachments[a].attachment;
347          if (iview->ubwc_enabled)
348             cntl |= A6XX_RB_RENDER_CNTL_FLAG_DEPTH;
349       }
350 
351       /* In the !binning case, we need to set RB_RENDER_CNTL in the draw_cs
352        * in order to set it correctly for the different subpasses. However,
353        * that means the packets we're emitting also happen during binning. So
354        * we need to guard the write on !BINNING at CP execution time.
355        */
356       tu_cs_reserve(cs, 3 + 4);
357       tu_cs_emit_pkt7(cs, CP_COND_REG_EXEC, 2);
358       tu_cs_emit(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
359                      CP_COND_REG_EXEC_0_GMEM | CP_COND_REG_EXEC_0_SYSMEM);
360       tu_cs_emit(cs, CP_COND_REG_EXEC_1_DWORDS(4));
361    }
362 
363    tu_cs_emit_pkt7(cs, CP_REG_WRITE, 3);
364    tu_cs_emit(cs, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
365    tu_cs_emit(cs, REG_A6XX_RB_RENDER_CNTL);
366    tu_cs_emit(cs, cntl);
367 }
368 
369 static void
tu6_emit_blit_scissor(struct tu_cmd_buffer * cmd,struct tu_cs * cs,bool align)370 tu6_emit_blit_scissor(struct tu_cmd_buffer *cmd, struct tu_cs *cs, bool align)
371 {
372    struct tu_physical_device *phys_dev = cmd->device->physical_device;
373    const VkRect2D *render_area = &cmd->state.render_area;
374 
375    /* Avoid assertion fails with an empty render area at (0, 0) where the
376     * subtraction below wraps around. Empty render areas should be forced to
377     * the sysmem path by use_sysmem_rendering(). It's not even clear whether
378     * an empty scissor here works, and the blob seems to force sysmem too as
379     * it sets something wrong (non-empty) for the scissor.
380     */
381    if (render_area->extent.width == 0 ||
382        render_area->extent.height == 0)
383       return;
384 
385    uint32_t x1 = render_area->offset.x;
386    uint32_t y1 = render_area->offset.y;
387    uint32_t x2 = x1 + render_area->extent.width - 1;
388    uint32_t y2 = y1 + render_area->extent.height - 1;
389 
390    if (align) {
391       x1 = x1 & ~(phys_dev->info.gmem_align_w - 1);
392       y1 = y1 & ~(phys_dev->info.gmem_align_h - 1);
393       x2 = ALIGN_POT(x2 + 1, phys_dev->info.gmem_align_w) - 1;
394       y2 = ALIGN_POT(y2 + 1, phys_dev->info.gmem_align_h) - 1;
395    }
396 
397    tu_cs_emit_regs(cs,
398                    A6XX_RB_BLIT_SCISSOR_TL(.x = x1, .y = y1),
399                    A6XX_RB_BLIT_SCISSOR_BR(.x = x2, .y = y2));
400 }
401 
402 void
tu6_emit_window_scissor(struct tu_cs * cs,uint32_t x1,uint32_t y1,uint32_t x2,uint32_t y2)403 tu6_emit_window_scissor(struct tu_cs *cs,
404                         uint32_t x1,
405                         uint32_t y1,
406                         uint32_t x2,
407                         uint32_t y2)
408 {
409    tu_cs_emit_regs(cs,
410                    A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
411                    A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
412 
413    tu_cs_emit_regs(cs,
414                    A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
415                    A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
416 }
417 
418 void
tu6_emit_window_offset(struct tu_cs * cs,uint32_t x1,uint32_t y1)419 tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1)
420 {
421    tu_cs_emit_regs(cs,
422                    A6XX_RB_WINDOW_OFFSET(.x = x1, .y = y1));
423 
424    tu_cs_emit_regs(cs,
425                    A6XX_RB_WINDOW_OFFSET2(.x = x1, .y = y1));
426 
427    tu_cs_emit_regs(cs,
428                    A6XX_SP_WINDOW_OFFSET(.x = x1, .y = y1));
429 
430    tu_cs_emit_regs(cs,
431                    A6XX_SP_TP_WINDOW_OFFSET(.x = x1, .y = y1));
432 }
433 
434 static void
tu_cs_emit_draw_state(struct tu_cs * cs,uint32_t id,struct tu_draw_state state)435 tu_cs_emit_draw_state(struct tu_cs *cs, uint32_t id, struct tu_draw_state state)
436 {
437    uint32_t enable_mask;
438    switch (id) {
439    case TU_DRAW_STATE_PROGRAM:
440    case TU_DRAW_STATE_VI:
441    case TU_DRAW_STATE_FS_CONST:
442    /* The blob seems to not enable this (DESC_SETS_LOAD) for binning, even
443     * when resources would actually be used in the binning shader.
444     * Presumably the overhead of prefetching the resources isn't
445     * worth it.
446     */
447    case TU_DRAW_STATE_DESC_SETS_LOAD:
448       enable_mask = CP_SET_DRAW_STATE__0_GMEM |
449                     CP_SET_DRAW_STATE__0_SYSMEM;
450       break;
451    case TU_DRAW_STATE_PROGRAM_BINNING:
452    case TU_DRAW_STATE_VI_BINNING:
453       enable_mask = CP_SET_DRAW_STATE__0_BINNING;
454       break;
455    case TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM:
456       enable_mask = CP_SET_DRAW_STATE__0_GMEM;
457       break;
458    case TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM:
459       enable_mask = CP_SET_DRAW_STATE__0_SYSMEM;
460       break;
461    default:
462       enable_mask = CP_SET_DRAW_STATE__0_GMEM |
463                     CP_SET_DRAW_STATE__0_SYSMEM |
464                     CP_SET_DRAW_STATE__0_BINNING;
465       break;
466    }
467 
468    STATIC_ASSERT(TU_DRAW_STATE_COUNT <= 32);
469 
470    /* We need to reload the descriptors every time the descriptor sets
471     * change. However, the commands we send only depend on the pipeline
472     * because the whole point is to cache descriptors which are used by the
473     * pipeline. There's a problem here, in that the firmware has an
474     * "optimization" which skips executing groups that are set to the same
475     * value as the last draw. This means that if the descriptor sets change
476     * but not the pipeline, we'd try to re-execute the same buffer which
477     * the firmware would ignore and we wouldn't pre-load the new
478     * descriptors. Set the DIRTY bit to avoid this optimization
479     */
480    if (id == TU_DRAW_STATE_DESC_SETS_LOAD)
481       enable_mask |= CP_SET_DRAW_STATE__0_DIRTY;
482 
483    tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(state.size) |
484                   enable_mask |
485                   CP_SET_DRAW_STATE__0_GROUP_ID(id) |
486                   COND(!state.size, CP_SET_DRAW_STATE__0_DISABLE));
487    tu_cs_emit_qw(cs, state.iova);
488 }
489 
490 static bool
use_hw_binning(struct tu_cmd_buffer * cmd)491 use_hw_binning(struct tu_cmd_buffer *cmd)
492 {
493    const struct tu_framebuffer *fb = cmd->state.framebuffer;
494 
495    /* XFB commands are emitted for BINNING || SYSMEM, which makes it incompatible
496     * with non-hw binning GMEM rendering. this is required because some of the
497     * XFB commands need to only be executed once
498     */
499    if (cmd->state.xfb_used)
500       return true;
501 
502    /* Some devices have a newer a630_sqe.fw in which, only in CP_DRAW_INDX and
503     * CP_DRAW_INDX_OFFSET, visibility-based skipping happens *before*
504     * predication-based skipping. It seems this breaks predication, because
505     * draws skipped by predication will not be executed in the binning phase,
506     * and therefore won't have an entry in the draw stream, but the
507     * visibility-based skipping will expect it to have an entry. The result is
508     * a GPU hang when actually executing the first non-predicated draw.
509     * However, it seems that things still work if the whole renderpass is
510     * predicated. Affected tests are
511     * dEQP-VK.conditional_rendering.draw_clear.draw.case_2 as well as a few
512     * other case_N.
513     *
514     * Broken FW version: 016ee181
515     * linux-firmware (working) FW version: 016ee176
516     *
517     * All known a650_sqe.fw versions don't have this bug.
518     *
519     * TODO: we should do version detection of the FW so that devices using the
520     * linux-firmware version of a630_sqe.fw don't need this workaround.
521     */
522    if (cmd->state.has_subpass_predication && cmd->device->physical_device->gpu_id != 650)
523       return false;
524 
525    if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_NOBIN))
526       return false;
527 
528    if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_FORCEBIN))
529       return true;
530 
531    return (fb->tile_count.width * fb->tile_count.height) > 2;
532 }
533 
534 static bool
use_sysmem_rendering(struct tu_cmd_buffer * cmd)535 use_sysmem_rendering(struct tu_cmd_buffer *cmd)
536 {
537    if (unlikely(cmd->device->physical_device->instance->debug_flags & TU_DEBUG_SYSMEM))
538       return true;
539 
540    /* If hw binning is required because of XFB but doesn't work because of the
541     * conditional rendering bug, fallback to sysmem.
542     */
543    if (cmd->state.xfb_used && cmd->state.has_subpass_predication &&
544        cmd->device->physical_device->gpu_id != 650)
545       return true;
546 
547    /* can't fit attachments into gmem */
548    if (!cmd->state.pass->gmem_pixels)
549       return true;
550 
551    if (cmd->state.framebuffer->layers > 1)
552       return true;
553 
554    /* Use sysmem for empty render areas */
555    if (cmd->state.render_area.extent.width == 0 ||
556        cmd->state.render_area.extent.height == 0)
557       return true;
558 
559    if (cmd->state.has_tess)
560       return true;
561 
562    return false;
563 }
564 
565 static void
tu6_emit_tile_select(struct tu_cmd_buffer * cmd,struct tu_cs * cs,uint32_t tx,uint32_t ty,uint32_t pipe,uint32_t slot)566 tu6_emit_tile_select(struct tu_cmd_buffer *cmd,
567                      struct tu_cs *cs,
568                      uint32_t tx, uint32_t ty, uint32_t pipe, uint32_t slot)
569 {
570    const struct tu_framebuffer *fb = cmd->state.framebuffer;
571 
572    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
573    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
574 
575    const uint32_t x1 = fb->tile0.width * tx;
576    const uint32_t y1 = fb->tile0.height * ty;
577    const uint32_t x2 = x1 + fb->tile0.width - 1;
578    const uint32_t y2 = y1 + fb->tile0.height - 1;
579    tu6_emit_window_scissor(cs, x1, y1, x2, y2);
580    tu6_emit_window_offset(cs, x1, y1);
581 
582    tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
583 
584    if (use_hw_binning(cmd)) {
585       tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
586 
587       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
588       tu_cs_emit(cs, 0x0);
589 
590       tu_cs_emit_pkt7(cs, CP_SET_BIN_DATA5_OFFSET, 4);
591       tu_cs_emit(cs, fb->pipe_sizes[pipe] |
592                      CP_SET_BIN_DATA5_0_VSC_N(slot));
593       tu_cs_emit(cs, pipe * cmd->vsc_draw_strm_pitch);
594       tu_cs_emit(cs, pipe * 4);
595       tu_cs_emit(cs, pipe * cmd->vsc_prim_strm_pitch);
596 
597       tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
598       tu_cs_emit(cs, 0x0);
599 
600       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
601       tu_cs_emit(cs, 0x0);
602    } else {
603       tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
604       tu_cs_emit(cs, 0x1);
605 
606       tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
607       tu_cs_emit(cs, 0x0);
608    }
609 }
610 
611 static void
tu6_emit_sysmem_resolve(struct tu_cmd_buffer * cmd,struct tu_cs * cs,uint32_t layer_mask,uint32_t a,uint32_t gmem_a)612 tu6_emit_sysmem_resolve(struct tu_cmd_buffer *cmd,
613                         struct tu_cs *cs,
614                         uint32_t layer_mask,
615                         uint32_t a,
616                         uint32_t gmem_a)
617 {
618    const struct tu_framebuffer *fb = cmd->state.framebuffer;
619    struct tu_image_view *dst = fb->attachments[a].attachment;
620    struct tu_image_view *src = fb->attachments[gmem_a].attachment;
621 
622    tu_resolve_sysmem(cmd, cs, src, dst, layer_mask, fb->layers, &cmd->state.render_area);
623 }
624 
625 static void
tu6_emit_sysmem_resolves(struct tu_cmd_buffer * cmd,struct tu_cs * cs,const struct tu_subpass * subpass)626 tu6_emit_sysmem_resolves(struct tu_cmd_buffer *cmd,
627                          struct tu_cs *cs,
628                          const struct tu_subpass *subpass)
629 {
630    if (subpass->resolve_attachments) {
631       /* From the documentation for vkCmdNextSubpass, section 7.4 "Render Pass
632        * Commands":
633        *
634        *    End-of-subpass multisample resolves are treated as color
635        *    attachment writes for the purposes of synchronization. That is,
636        *    they are considered to execute in the
637        *    VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT pipeline stage and
638        *    their writes are synchronized with
639        *    VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT. Synchronization between
640        *    rendering within a subpass and any resolve operations at the end
641        *    of the subpass occurs automatically, without need for explicit
642        *    dependencies or pipeline barriers. However, if the resolve
643        *    attachment is also used in a different subpass, an explicit
644        *    dependency is needed.
645        *
646        * We use the CP_BLIT path for sysmem resolves, which is really a
647        * transfer command, so we have to manually flush similar to the gmem
648        * resolve case. However, a flush afterwards isn't needed because of the
649        * last sentence and the fact that we're in sysmem mode.
650        */
651       tu6_emit_event_write(cmd, cs, PC_CCU_FLUSH_COLOR_TS);
652       tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
653 
654       /* Wait for the flushes to land before using the 2D engine */
655       tu_cs_emit_wfi(cs);
656 
657       for (unsigned i = 0; i < subpass->color_count; i++) {
658          uint32_t a = subpass->resolve_attachments[i].attachment;
659          if (a == VK_ATTACHMENT_UNUSED)
660             continue;
661 
662          tu6_emit_sysmem_resolve(cmd, cs, subpass->multiview_mask, a,
663                                  subpass->color_attachments[i].attachment);
664       }
665    }
666 }
667 
668 static void
tu6_emit_tile_store(struct tu_cmd_buffer * cmd,struct tu_cs * cs)669 tu6_emit_tile_store(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
670 {
671    const struct tu_render_pass *pass = cmd->state.pass;
672    const struct tu_subpass *subpass = &pass->subpasses[pass->subpass_count-1];
673 
674    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
675    tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
676                      CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
677                      CP_SET_DRAW_STATE__0_GROUP_ID(0));
678    tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
679    tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
680 
681    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
682    tu_cs_emit(cs, 0x0);
683 
684    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
685    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
686 
687    tu6_emit_blit_scissor(cmd, cs, true);
688 
689    for (uint32_t a = 0; a < pass->attachment_count; ++a) {
690       if (pass->attachments[a].gmem_offset >= 0)
691          tu_store_gmem_attachment(cmd, cs, a, a);
692    }
693 
694    if (subpass->resolve_attachments) {
695       for (unsigned i = 0; i < subpass->color_count; i++) {
696          uint32_t a = subpass->resolve_attachments[i].attachment;
697          if (a != VK_ATTACHMENT_UNUSED)
698             tu_store_gmem_attachment(cmd, cs, a,
699                                      subpass->color_attachments[i].attachment);
700       }
701    }
702 }
703 
704 static void
tu6_init_hw(struct tu_cmd_buffer * cmd,struct tu_cs * cs)705 tu6_init_hw(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
706 {
707    struct tu_device *dev = cmd->device;
708    const struct tu_physical_device *phys_dev = dev->physical_device;
709 
710    tu6_emit_event_write(cmd, cs, CACHE_INVALIDATE);
711 
712    tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(
713          .vs_state = true,
714          .hs_state = true,
715          .ds_state = true,
716          .gs_state = true,
717          .fs_state = true,
718          .cs_state = true,
719          .gfx_ibo = true,
720          .cs_ibo = true,
721          .gfx_shared_const = true,
722          .cs_shared_const = true,
723          .gfx_bindless = 0x1f,
724          .cs_bindless = 0x1f));
725 
726    tu_cs_emit_wfi(cs);
727 
728    cmd->state.cache.pending_flush_bits &=
729       ~(TU_CMD_FLAG_WAIT_FOR_IDLE | TU_CMD_FLAG_CACHE_INVALIDATE);
730 
731    tu_cs_emit_regs(cs,
732                    A6XX_RB_CCU_CNTL(.offset = phys_dev->info.a6xx.ccu_offset_bypass));
733    cmd->state.ccu_state = TU_CMD_CCU_SYSMEM;
734    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E04, 0x00100000);
735    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE04, 0x8);
736    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE00, 0);
737    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE0F, 0x3f);
738    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B605, 0x44);
739    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B600, 0x100000);
740    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE00, 0x80);
741    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE01, 0);
742 
743    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9600, 0);
744    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8600, 0x880);
745    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_UNKNOWN_BE04, 0);
746    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_AE03, 0x00000410);
747    tu_cs_emit_write_reg(cs, REG_A6XX_SP_IBO_COUNT, 0);
748    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B182, 0);
749    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_SHARED_CONSTS, 0);
750    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_UNKNOWN_0E12, 0x3200000);
751    tu_cs_emit_write_reg(cs, REG_A6XX_UCHE_CLIENT_PF, 4);
752    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8E01, 0x0);
753    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A982, 0);
754    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A9A8, 0);
755    tu_cs_emit_write_reg(cs, REG_A6XX_SP_MODE_CONTROL,
756                         A6XX_SP_MODE_CONTROL_CONSTANT_DEMOTION_ENABLE | 4);
757 
758    /* TODO: set A6XX_VFD_ADD_OFFSET_INSTANCE and fix ir3 to avoid adding base instance */
759    tu_cs_emit_write_reg(cs, REG_A6XX_VFD_ADD_OFFSET, A6XX_VFD_ADD_OFFSET_VERTEX);
760    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8811, 0x00000010);
761    tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x1f);
762 
763    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8110, 0);
764 
765    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8818, 0);
766    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_8819, 0);
767    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881A, 0);
768    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881B, 0);
769    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881C, 0);
770    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881D, 0);
771    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_881E, 0);
772    tu_cs_emit_write_reg(cs, REG_A6XX_RB_UNKNOWN_88F0, 0);
773 
774    tu_cs_emit_regs(cs, A6XX_VPC_POINT_COORD_INVERT(false));
775    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9300, 0);
776 
777    tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
778 
779    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_A81B, 0);
780 
781    tu_cs_emit_write_reg(cs, REG_A6XX_SP_UNKNOWN_B183, 0);
782 
783    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_8099, 0);
784    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80A0, 2);
785    tu_cs_emit_write_reg(cs, REG_A6XX_GRAS_UNKNOWN_80AF, 0);
786    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9210, 0);
787    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9211, 0);
788    tu_cs_emit_write_reg(cs, REG_A6XX_VPC_UNKNOWN_9602, 0);
789    tu_cs_emit_write_reg(cs, REG_A6XX_PC_UNKNOWN_9E72, 0);
790    tu_cs_emit_write_reg(cs, REG_A6XX_SP_TP_UNKNOWN_B309, 0x000000a2);
791    tu_cs_emit_write_reg(cs, REG_A6XX_HLSQ_CONTROL_5_REG, 0xfc);
792 
793    tu_cs_emit_write_reg(cs, REG_A6XX_VFD_MODE_CNTL, 0x00000000);
794 
795    tu_cs_emit_write_reg(cs, REG_A6XX_PC_MODE_CNTL, 0x0000001f);
796 
797    tu_cs_emit_regs(cs, A6XX_RB_ALPHA_CONTROL());
798 
799    /* we don't use this yet.. probably best to disable.. */
800    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
801    tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
802                      CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
803                      CP_SET_DRAW_STATE__0_GROUP_ID(0));
804    tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
805    tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
806 
807    tu_cs_emit_regs(cs,
808                    A6XX_SP_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
809                                                      .bo_offset = gb_offset(bcolor_builtin)));
810    tu_cs_emit_regs(cs,
811                    A6XX_SP_PS_TP_BORDER_COLOR_BASE_ADDR(.bo = &dev->global_bo,
812                                                         .bo_offset = gb_offset(bcolor_builtin)));
813 
814    /* VSC buffers:
815     * use vsc pitches from the largest values used so far with this device
816     * if there hasn't been overflow, there will already be a scratch bo
817     * allocated for these sizes
818     *
819     * if overflow is detected, the stream size is increased by 2x
820     */
821    mtx_lock(&dev->mutex);
822 
823    struct tu6_global *global = dev->global_bo.map;
824 
825    uint32_t vsc_draw_overflow = global->vsc_draw_overflow;
826    uint32_t vsc_prim_overflow = global->vsc_prim_overflow;
827 
828    if (vsc_draw_overflow >= dev->vsc_draw_strm_pitch)
829       dev->vsc_draw_strm_pitch = (dev->vsc_draw_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
830 
831    if (vsc_prim_overflow >= dev->vsc_prim_strm_pitch)
832       dev->vsc_prim_strm_pitch = (dev->vsc_prim_strm_pitch - VSC_PAD) * 2 + VSC_PAD;
833 
834    cmd->vsc_prim_strm_pitch = dev->vsc_prim_strm_pitch;
835    cmd->vsc_draw_strm_pitch = dev->vsc_draw_strm_pitch;
836 
837    mtx_unlock(&dev->mutex);
838 
839    struct tu_bo *vsc_bo;
840    uint32_t size0 = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES +
841                     cmd->vsc_draw_strm_pitch * MAX_VSC_PIPES;
842 
843    tu_get_scratch_bo(dev, size0 + MAX_VSC_PIPES * 4, &vsc_bo);
844 
845    tu_cs_emit_regs(cs,
846                    A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = vsc_bo, .bo_offset = size0));
847    tu_cs_emit_regs(cs,
848                    A6XX_VSC_PRIM_STRM_ADDRESS(.bo = vsc_bo));
849    tu_cs_emit_regs(cs,
850                    A6XX_VSC_DRAW_STRM_ADDRESS(.bo = vsc_bo,
851                                               .bo_offset = cmd->vsc_prim_strm_pitch * MAX_VSC_PIPES));
852 
853    tu_cs_sanity_check(cs);
854 }
855 
856 static void
update_vsc_pipe(struct tu_cmd_buffer * cmd,struct tu_cs * cs)857 update_vsc_pipe(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
858 {
859    const struct tu_framebuffer *fb = cmd->state.framebuffer;
860 
861    tu_cs_emit_regs(cs,
862                    A6XX_VSC_BIN_SIZE(.width = fb->tile0.width,
863                                      .height = fb->tile0.height));
864 
865    tu_cs_emit_regs(cs,
866                    A6XX_VSC_BIN_COUNT(.nx = fb->tile_count.width,
867                                       .ny = fb->tile_count.height));
868 
869    tu_cs_emit_pkt4(cs, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
870    tu_cs_emit_array(cs, fb->pipe_config, 32);
871 
872    tu_cs_emit_regs(cs,
873                    A6XX_VSC_PRIM_STRM_PITCH(cmd->vsc_prim_strm_pitch),
874                    A6XX_VSC_PRIM_STRM_LIMIT(cmd->vsc_prim_strm_pitch - VSC_PAD));
875 
876    tu_cs_emit_regs(cs,
877                    A6XX_VSC_DRAW_STRM_PITCH(cmd->vsc_draw_strm_pitch),
878                    A6XX_VSC_DRAW_STRM_LIMIT(cmd->vsc_draw_strm_pitch - VSC_PAD));
879 }
880 
881 static void
emit_vsc_overflow_test(struct tu_cmd_buffer * cmd,struct tu_cs * cs)882 emit_vsc_overflow_test(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
883 {
884    const struct tu_framebuffer *fb = cmd->state.framebuffer;
885    const uint32_t used_pipe_count =
886       fb->pipe_count.width * fb->pipe_count.height;
887 
888    for (int i = 0; i < used_pipe_count; i++) {
889       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
890       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
891             CP_COND_WRITE5_0_WRITE_MEMORY);
892       tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
893       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
894       tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_draw_strm_pitch - VSC_PAD));
895       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
896       tu_cs_emit_qw(cs, global_iova(cmd, vsc_draw_overflow));
897       tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_draw_strm_pitch));
898 
899       tu_cs_emit_pkt7(cs, CP_COND_WRITE5, 8);
900       tu_cs_emit(cs, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
901             CP_COND_WRITE5_0_WRITE_MEMORY);
902       tu_cs_emit(cs, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
903       tu_cs_emit(cs, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
904       tu_cs_emit(cs, CP_COND_WRITE5_3_REF(cmd->vsc_prim_strm_pitch - VSC_PAD));
905       tu_cs_emit(cs, CP_COND_WRITE5_4_MASK(~0));
906       tu_cs_emit_qw(cs, global_iova(cmd, vsc_prim_overflow));
907       tu_cs_emit(cs, CP_COND_WRITE5_7_WRITE_DATA(cmd->vsc_prim_strm_pitch));
908    }
909 
910    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
911 }
912 
913 static void
tu6_emit_binning_pass(struct tu_cmd_buffer * cmd,struct tu_cs * cs)914 tu6_emit_binning_pass(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
915 {
916    struct tu_physical_device *phys_dev = cmd->device->physical_device;
917    const struct tu_framebuffer *fb = cmd->state.framebuffer;
918 
919    tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
920 
921    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
922    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
923 
924    tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
925    tu_cs_emit(cs, 0x1);
926 
927    tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
928    tu_cs_emit(cs, 0x1);
929 
930    tu_cs_emit_wfi(cs);
931 
932    tu_cs_emit_regs(cs,
933                    A6XX_VFD_MODE_CNTL(.binning_pass = true));
934 
935    update_vsc_pipe(cmd, cs);
936 
937    tu_cs_emit_regs(cs,
938                    A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->info.a6xx.magic.PC_UNKNOWN_9805));
939 
940    tu_cs_emit_regs(cs,
941                    A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->info.a6xx.magic.SP_UNKNOWN_A0F8));
942 
943    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
944    tu_cs_emit(cs, UNK_2C);
945 
946    tu_cs_emit_regs(cs,
947                    A6XX_RB_WINDOW_OFFSET(.x = 0, .y = 0));
948 
949    tu_cs_emit_regs(cs,
950                    A6XX_SP_TP_WINDOW_OFFSET(.x = 0, .y = 0));
951 
952    /* emit IB to binning drawcmds: */
953    tu_cs_emit_call(cs, &cmd->draw_cs);
954 
955    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3);
956    tu_cs_emit(cs, CP_SET_DRAW_STATE__0_COUNT(0) |
957                   CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
958                   CP_SET_DRAW_STATE__0_GROUP_ID(0));
959    tu_cs_emit(cs, CP_SET_DRAW_STATE__1_ADDR_LO(0));
960    tu_cs_emit(cs, CP_SET_DRAW_STATE__2_ADDR_HI(0));
961 
962    tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 1);
963    tu_cs_emit(cs, UNK_2D);
964 
965    /* This flush is probably required because the VSC, which produces the
966     * visibility stream, is a client of UCHE, whereas the CP needs to read the
967     * visibility stream (without caching) to do draw skipping. The
968     * WFI+WAIT_FOR_ME combination guarantees that the binning commands
969     * submitted are finished before reading the VSC regs (in
970     * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly as
971     * part of draws).
972     */
973    tu6_emit_event_write(cmd, cs, CACHE_FLUSH_TS);
974 
975    tu_cs_emit_wfi(cs);
976 
977    tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
978 
979    emit_vsc_overflow_test(cmd, cs);
980 
981    tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
982    tu_cs_emit(cs, 0x0);
983 
984    tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
985    tu_cs_emit(cs, 0x0);
986 }
987 
988 static struct tu_draw_state
tu_emit_input_attachments(struct tu_cmd_buffer * cmd,const struct tu_subpass * subpass,bool gmem)989 tu_emit_input_attachments(struct tu_cmd_buffer *cmd,
990                           const struct tu_subpass *subpass,
991                           bool gmem)
992 {
993    /* note: we can probably emit input attachments just once for the whole
994     * renderpass, this would avoid emitting both sysmem/gmem versions
995     *
996     * emit two texture descriptors for each input, as a workaround for
997     * d24s8/d32s8, which can be sampled as both float (depth) and integer (stencil)
998     * tu_shader lowers uint input attachment loads to use the 2nd descriptor
999     * in the pair
1000     * TODO: a smarter workaround
1001     */
1002 
1003    if (!subpass->input_count)
1004       return (struct tu_draw_state) {};
1005 
1006    struct tu_cs_memory texture;
1007    VkResult result = tu_cs_alloc(&cmd->sub_cs, subpass->input_count * 2,
1008                                  A6XX_TEX_CONST_DWORDS, &texture);
1009    if (result != VK_SUCCESS) {
1010       cmd->record_result = result;
1011       return (struct tu_draw_state) {};
1012    }
1013 
1014    for (unsigned i = 0; i < subpass->input_count * 2; i++) {
1015       uint32_t a = subpass->input_attachments[i / 2].attachment;
1016       if (a == VK_ATTACHMENT_UNUSED)
1017          continue;
1018 
1019       struct tu_image_view *iview =
1020          cmd->state.framebuffer->attachments[a].attachment;
1021       const struct tu_render_pass_attachment *att =
1022          &cmd->state.pass->attachments[a];
1023       uint32_t *dst = &texture.map[A6XX_TEX_CONST_DWORDS * i];
1024       uint32_t gmem_offset = att->gmem_offset;
1025       uint32_t cpp = att->cpp;
1026 
1027       memcpy(dst, iview->descriptor, A6XX_TEX_CONST_DWORDS * 4);
1028 
1029       if (i % 2 == 1 && att->format == VK_FORMAT_D24_UNORM_S8_UINT) {
1030          /* note this works because spec says fb and input attachments
1031           * must use identity swizzle
1032           */
1033          dst[0] &= ~(A6XX_TEX_CONST_0_FMT__MASK |
1034             A6XX_TEX_CONST_0_SWIZ_X__MASK | A6XX_TEX_CONST_0_SWIZ_Y__MASK |
1035             A6XX_TEX_CONST_0_SWIZ_Z__MASK | A6XX_TEX_CONST_0_SWIZ_W__MASK);
1036          if (cmd->device->physical_device->limited_z24s8) {
1037             dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_8_8_8_UINT) |
1038                A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_W) |
1039                A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
1040                A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
1041                A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
1042          } else {
1043             dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_Z24_UINT_S8_UINT) |
1044                A6XX_TEX_CONST_0_SWIZ_X(A6XX_TEX_Y) |
1045                A6XX_TEX_CONST_0_SWIZ_Y(A6XX_TEX_ZERO) |
1046                A6XX_TEX_CONST_0_SWIZ_Z(A6XX_TEX_ZERO) |
1047                A6XX_TEX_CONST_0_SWIZ_W(A6XX_TEX_ONE);
1048          }
1049       }
1050 
1051       if (i % 2 == 1 && att->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
1052          dst[0] &= ~A6XX_TEX_CONST_0_FMT__MASK;
1053          dst[0] |= A6XX_TEX_CONST_0_FMT(FMT6_8_UINT);
1054          dst[2] &= ~(A6XX_TEX_CONST_2_PITCHALIGN__MASK | A6XX_TEX_CONST_2_PITCH__MASK);
1055          dst[2] |= A6XX_TEX_CONST_2_PITCH(iview->stencil_PITCH << 6);
1056          dst[3] = 0;
1057          dst[4] = iview->stencil_base_addr;
1058          dst[5] = (dst[5] & 0xffff) | iview->stencil_base_addr >> 32;
1059 
1060          cpp = att->samples;
1061          gmem_offset = att->gmem_offset_stencil;
1062       }
1063 
1064       if (!gmem)
1065          continue;
1066 
1067       /* patched for gmem */
1068       dst[0] &= ~(A6XX_TEX_CONST_0_SWAP__MASK | A6XX_TEX_CONST_0_TILE_MODE__MASK);
1069       dst[0] |= A6XX_TEX_CONST_0_TILE_MODE(TILE6_2);
1070       dst[2] =
1071          A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D) |
1072          A6XX_TEX_CONST_2_PITCH(cmd->state.framebuffer->tile0.width * cpp);
1073       dst[3] = 0;
1074       dst[4] = cmd->device->physical_device->gmem_base + gmem_offset;
1075       dst[5] = A6XX_TEX_CONST_5_DEPTH(1);
1076       for (unsigned i = 6; i < A6XX_TEX_CONST_DWORDS; i++)
1077          dst[i] = 0;
1078    }
1079 
1080    struct tu_cs cs;
1081    struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &cs, 9);
1082 
1083    tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_FRAG, 3);
1084    tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(0) |
1085                   CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
1086                   CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
1087                   CP_LOAD_STATE6_0_STATE_BLOCK(SB6_FS_TEX) |
1088                   CP_LOAD_STATE6_0_NUM_UNIT(subpass->input_count * 2));
1089    tu_cs_emit_qw(&cs, texture.iova);
1090 
1091    tu_cs_emit_pkt4(&cs, REG_A6XX_SP_FS_TEX_CONST_LO, 2);
1092    tu_cs_emit_qw(&cs, texture.iova);
1093 
1094    tu_cs_emit_regs(&cs, A6XX_SP_FS_TEX_COUNT(subpass->input_count * 2));
1095 
1096    assert(cs.cur == cs.end); /* validate draw state size */
1097 
1098    return ds;
1099 }
1100 
1101 static void
tu_set_input_attachments(struct tu_cmd_buffer * cmd,const struct tu_subpass * subpass)1102 tu_set_input_attachments(struct tu_cmd_buffer *cmd, const struct tu_subpass *subpass)
1103 {
1104    struct tu_cs *cs = &cmd->draw_cs;
1105 
1106    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 6);
1107    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
1108                          tu_emit_input_attachments(cmd, subpass, true));
1109    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
1110                          tu_emit_input_attachments(cmd, subpass, false));
1111 }
1112 
1113 static void
tu_emit_renderpass_begin(struct tu_cmd_buffer * cmd,const VkRenderPassBeginInfo * info)1114 tu_emit_renderpass_begin(struct tu_cmd_buffer *cmd,
1115                          const VkRenderPassBeginInfo *info)
1116 {
1117    struct tu_cs *cs = &cmd->draw_cs;
1118 
1119    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
1120 
1121    tu6_emit_blit_scissor(cmd, cs, true);
1122 
1123    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1124       tu_load_gmem_attachment(cmd, cs, i, false);
1125 
1126    tu6_emit_blit_scissor(cmd, cs, false);
1127 
1128    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1129       tu_clear_gmem_attachment(cmd, cs, i, info);
1130 
1131    tu_cond_exec_end(cs);
1132 
1133    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
1134 
1135    for (uint32_t i = 0; i < cmd->state.pass->attachment_count; ++i)
1136       tu_clear_sysmem_attachment(cmd, cs, i, info);
1137 
1138    tu_cond_exec_end(cs);
1139 }
1140 
1141 static void
tu6_sysmem_render_begin(struct tu_cmd_buffer * cmd,struct tu_cs * cs)1142 tu6_sysmem_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1143 {
1144    const struct tu_framebuffer *fb = cmd->state.framebuffer;
1145 
1146    assert(fb->width > 0 && fb->height > 0);
1147    tu6_emit_window_scissor(cs, 0, 0, fb->width - 1, fb->height - 1);
1148    tu6_emit_window_offset(cs, 0, 0);
1149 
1150    tu6_emit_bin_size(cs, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1151 
1152    tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1153 
1154    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1155    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1156 
1157    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1158    tu_cs_emit(cs, 0x0);
1159 
1160    tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_SYSMEM);
1161 
1162    /* enable stream-out, with sysmem there is only one pass: */
1163    tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1164 
1165    tu_cs_emit_pkt7(cs, CP_SET_VISIBILITY_OVERRIDE, 1);
1166    tu_cs_emit(cs, 0x1);
1167 
1168    tu_cs_emit_pkt7(cs, CP_SET_MODE, 1);
1169    tu_cs_emit(cs, 0x0);
1170 
1171    tu_cs_sanity_check(cs);
1172 }
1173 
1174 static void
tu6_sysmem_render_end(struct tu_cmd_buffer * cmd,struct tu_cs * cs)1175 tu6_sysmem_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1176 {
1177    /* Do any resolves of the last subpass. These are handled in the
1178     * tile_store_ib in the gmem path.
1179     */
1180    tu6_emit_sysmem_resolves(cmd, cs, cmd->state.subpass);
1181 
1182    tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1183 
1184    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1185    tu_cs_emit(cs, 0x0);
1186 
1187    tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1188 
1189    tu_cs_sanity_check(cs);
1190 }
1191 
1192 static void
tu6_tile_render_begin(struct tu_cmd_buffer * cmd,struct tu_cs * cs)1193 tu6_tile_render_begin(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1194 {
1195    struct tu_physical_device *phys_dev = cmd->device->physical_device;
1196 
1197    tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1198 
1199    tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1200    tu_cs_emit(cs, 0x0);
1201 
1202    tu_emit_cache_flush_ccu(cmd, cs, TU_CMD_CCU_GMEM);
1203 
1204    const struct tu_framebuffer *fb = cmd->state.framebuffer;
1205    if (use_hw_binning(cmd)) {
1206       /* enable stream-out during binning pass: */
1207       tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1208 
1209       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1210                         A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
1211 
1212       tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, true);
1213 
1214       tu6_emit_binning_pass(cmd, cs);
1215 
1216       /* and disable stream-out for draw pass: */
1217       tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(true));
1218 
1219       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height,
1220                         A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
1221 
1222       tu_cs_emit_regs(cs,
1223                       A6XX_VFD_MODE_CNTL(0));
1224 
1225       tu_cs_emit_regs(cs, A6XX_PC_UNKNOWN_9805(.unknown = phys_dev->info.a6xx.magic.PC_UNKNOWN_9805));
1226 
1227       tu_cs_emit_regs(cs, A6XX_SP_UNKNOWN_A0F8(.unknown = phys_dev->info.a6xx.magic.SP_UNKNOWN_A0F8));
1228 
1229       tu_cs_emit_pkt7(cs, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1230       tu_cs_emit(cs, 0x1);
1231    } else {
1232       /* no binning pass, so enable stream-out for draw pass:: */
1233       tu_cs_emit_regs(cs, A6XX_VPC_SO_DISABLE(false));
1234 
1235       tu6_emit_bin_size(cs, fb->tile0.width, fb->tile0.height, 0x6000000);
1236    }
1237 
1238    tu_cs_sanity_check(cs);
1239 }
1240 
1241 static void
tu6_render_tile(struct tu_cmd_buffer * cmd,struct tu_cs * cs)1242 tu6_render_tile(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1243 {
1244    tu_cs_emit_call(cs, &cmd->draw_cs);
1245 
1246    if (use_hw_binning(cmd)) {
1247       tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
1248       tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1249    }
1250 
1251    tu_cs_emit_ib(cs, &cmd->state.tile_store_ib);
1252 
1253    tu_cs_sanity_check(cs);
1254 }
1255 
1256 static void
tu6_tile_render_end(struct tu_cmd_buffer * cmd,struct tu_cs * cs)1257 tu6_tile_render_end(struct tu_cmd_buffer *cmd, struct tu_cs *cs)
1258 {
1259    tu_cs_emit_call(cs, &cmd->draw_epilogue_cs);
1260 
1261    tu_cs_emit_regs(cs,
1262                    A6XX_GRAS_LRZ_CNTL(0));
1263 
1264    tu6_emit_event_write(cmd, cs, LRZ_FLUSH);
1265 
1266    tu6_emit_event_write(cmd, cs, PC_CCU_RESOLVE_TS);
1267 
1268    tu_cs_sanity_check(cs);
1269 }
1270 
1271 static void
tu_cmd_render_tiles(struct tu_cmd_buffer * cmd)1272 tu_cmd_render_tiles(struct tu_cmd_buffer *cmd)
1273 {
1274    const struct tu_framebuffer *fb = cmd->state.framebuffer;
1275 
1276    tu6_tile_render_begin(cmd, &cmd->cs);
1277 
1278    uint32_t pipe = 0;
1279    for (uint32_t py = 0; py < fb->pipe_count.height; py++) {
1280       for (uint32_t px = 0; px < fb->pipe_count.width; px++, pipe++) {
1281          uint32_t tx1 = px * fb->pipe0.width;
1282          uint32_t ty1 = py * fb->pipe0.height;
1283          uint32_t tx2 = MIN2(tx1 + fb->pipe0.width, fb->tile_count.width);
1284          uint32_t ty2 = MIN2(ty1 + fb->pipe0.height, fb->tile_count.height);
1285          uint32_t slot = 0;
1286          for (uint32_t ty = ty1; ty < ty2; ty++) {
1287             for (uint32_t tx = tx1; tx < tx2; tx++, slot++) {
1288                tu6_emit_tile_select(cmd, &cmd->cs, tx, ty, pipe, slot);
1289                tu6_render_tile(cmd, &cmd->cs);
1290             }
1291          }
1292       }
1293    }
1294 
1295    tu6_tile_render_end(cmd, &cmd->cs);
1296 }
1297 
1298 static void
tu_cmd_render_sysmem(struct tu_cmd_buffer * cmd)1299 tu_cmd_render_sysmem(struct tu_cmd_buffer *cmd)
1300 {
1301    tu6_sysmem_render_begin(cmd, &cmd->cs);
1302 
1303    tu_cs_emit_call(&cmd->cs, &cmd->draw_cs);
1304 
1305    tu6_sysmem_render_end(cmd, &cmd->cs);
1306 }
1307 
1308 static void
tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer * cmd)1309 tu_cmd_prepare_tile_store_ib(struct tu_cmd_buffer *cmd)
1310 {
1311    const uint32_t tile_store_space = 11 + (35 * 2) * cmd->state.pass->attachment_count;
1312    struct tu_cs sub_cs;
1313 
1314    VkResult result =
1315       tu_cs_begin_sub_stream(&cmd->sub_cs, tile_store_space, &sub_cs);
1316    if (result != VK_SUCCESS) {
1317       cmd->record_result = result;
1318       return;
1319    }
1320 
1321    /* emit to tile-store sub_cs */
1322    tu6_emit_tile_store(cmd, &sub_cs);
1323 
1324    cmd->state.tile_store_ib = tu_cs_end_sub_stream(&cmd->sub_cs, &sub_cs);
1325 }
1326 
1327 static VkResult
tu_create_cmd_buffer(struct tu_device * device,struct tu_cmd_pool * pool,VkCommandBufferLevel level,VkCommandBuffer * pCommandBuffer)1328 tu_create_cmd_buffer(struct tu_device *device,
1329                      struct tu_cmd_pool *pool,
1330                      VkCommandBufferLevel level,
1331                      VkCommandBuffer *pCommandBuffer)
1332 {
1333    struct tu_cmd_buffer *cmd_buffer;
1334 
1335    cmd_buffer = vk_object_zalloc(&device->vk, NULL, sizeof(*cmd_buffer),
1336                                  VK_OBJECT_TYPE_COMMAND_BUFFER);
1337    if (cmd_buffer == NULL)
1338       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1339 
1340    cmd_buffer->device = device;
1341    cmd_buffer->pool = pool;
1342    cmd_buffer->level = level;
1343 
1344    if (pool) {
1345       list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1346       cmd_buffer->queue_family_index = pool->queue_family_index;
1347 
1348    } else {
1349       /* Init the pool_link so we can safely call list_del when we destroy
1350        * the command buffer
1351        */
1352       list_inithead(&cmd_buffer->pool_link);
1353       cmd_buffer->queue_family_index = TU_QUEUE_GENERAL;
1354    }
1355 
1356    tu_cs_init(&cmd_buffer->cs, device, TU_CS_MODE_GROW, 4096);
1357    tu_cs_init(&cmd_buffer->draw_cs, device, TU_CS_MODE_GROW, 4096);
1358    tu_cs_init(&cmd_buffer->draw_epilogue_cs, device, TU_CS_MODE_GROW, 4096);
1359    tu_cs_init(&cmd_buffer->sub_cs, device, TU_CS_MODE_SUB_STREAM, 2048);
1360 
1361    *pCommandBuffer = tu_cmd_buffer_to_handle(cmd_buffer);
1362 
1363    return VK_SUCCESS;
1364 }
1365 
1366 static void
tu_cmd_buffer_destroy(struct tu_cmd_buffer * cmd_buffer)1367 tu_cmd_buffer_destroy(struct tu_cmd_buffer *cmd_buffer)
1368 {
1369    list_del(&cmd_buffer->pool_link);
1370 
1371    tu_cs_finish(&cmd_buffer->cs);
1372    tu_cs_finish(&cmd_buffer->draw_cs);
1373    tu_cs_finish(&cmd_buffer->draw_epilogue_cs);
1374    tu_cs_finish(&cmd_buffer->sub_cs);
1375 
1376    vk_object_free(&cmd_buffer->device->vk, &cmd_buffer->pool->alloc, cmd_buffer);
1377 }
1378 
1379 static VkResult
tu_reset_cmd_buffer(struct tu_cmd_buffer * cmd_buffer)1380 tu_reset_cmd_buffer(struct tu_cmd_buffer *cmd_buffer)
1381 {
1382    cmd_buffer->record_result = VK_SUCCESS;
1383 
1384    tu_cs_reset(&cmd_buffer->cs);
1385    tu_cs_reset(&cmd_buffer->draw_cs);
1386    tu_cs_reset(&cmd_buffer->draw_epilogue_cs);
1387    tu_cs_reset(&cmd_buffer->sub_cs);
1388 
1389    for (unsigned i = 0; i < MAX_BIND_POINTS; i++)
1390       memset(&cmd_buffer->descriptors[i].sets, 0, sizeof(cmd_buffer->descriptors[i].sets));
1391 
1392    cmd_buffer->status = TU_CMD_BUFFER_STATUS_INITIAL;
1393 
1394    return cmd_buffer->record_result;
1395 }
1396 
1397 VkResult
tu_AllocateCommandBuffers(VkDevice _device,const VkCommandBufferAllocateInfo * pAllocateInfo,VkCommandBuffer * pCommandBuffers)1398 tu_AllocateCommandBuffers(VkDevice _device,
1399                           const VkCommandBufferAllocateInfo *pAllocateInfo,
1400                           VkCommandBuffer *pCommandBuffers)
1401 {
1402    TU_FROM_HANDLE(tu_device, device, _device);
1403    TU_FROM_HANDLE(tu_cmd_pool, pool, pAllocateInfo->commandPool);
1404 
1405    VkResult result = VK_SUCCESS;
1406    uint32_t i;
1407 
1408    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1409 
1410       if (!list_is_empty(&pool->free_cmd_buffers)) {
1411          struct tu_cmd_buffer *cmd_buffer = list_first_entry(
1412             &pool->free_cmd_buffers, struct tu_cmd_buffer, pool_link);
1413 
1414          list_del(&cmd_buffer->pool_link);
1415          list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1416 
1417          result = tu_reset_cmd_buffer(cmd_buffer);
1418          cmd_buffer->level = pAllocateInfo->level;
1419 
1420          pCommandBuffers[i] = tu_cmd_buffer_to_handle(cmd_buffer);
1421       } else {
1422          result = tu_create_cmd_buffer(device, pool, pAllocateInfo->level,
1423                                        &pCommandBuffers[i]);
1424       }
1425       if (result != VK_SUCCESS)
1426          break;
1427    }
1428 
1429    if (result != VK_SUCCESS) {
1430       tu_FreeCommandBuffers(_device, pAllocateInfo->commandPool, i,
1431                             pCommandBuffers);
1432 
1433       /* From the Vulkan 1.0.66 spec:
1434        *
1435        * "vkAllocateCommandBuffers can be used to create multiple
1436        *  command buffers. If the creation of any of those command
1437        *  buffers fails, the implementation must destroy all
1438        *  successfully created command buffer objects from this
1439        *  command, set all entries of the pCommandBuffers array to
1440        *  NULL and return the error."
1441        */
1442       memset(pCommandBuffers, 0,
1443              sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
1444    }
1445 
1446    return result;
1447 }
1448 
1449 void
tu_FreeCommandBuffers(VkDevice device,VkCommandPool commandPool,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)1450 tu_FreeCommandBuffers(VkDevice device,
1451                       VkCommandPool commandPool,
1452                       uint32_t commandBufferCount,
1453                       const VkCommandBuffer *pCommandBuffers)
1454 {
1455    for (uint32_t i = 0; i < commandBufferCount; i++) {
1456       TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
1457 
1458       if (cmd_buffer) {
1459          if (cmd_buffer->pool) {
1460             list_del(&cmd_buffer->pool_link);
1461             list_addtail(&cmd_buffer->pool_link,
1462                          &cmd_buffer->pool->free_cmd_buffers);
1463          } else
1464             tu_cmd_buffer_destroy(cmd_buffer);
1465       }
1466    }
1467 }
1468 
1469 VkResult
tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)1470 tu_ResetCommandBuffer(VkCommandBuffer commandBuffer,
1471                       VkCommandBufferResetFlags flags)
1472 {
1473    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1474    return tu_reset_cmd_buffer(cmd_buffer);
1475 }
1476 
1477 /* Initialize the cache, assuming all necessary flushes have happened but *not*
1478  * invalidations.
1479  */
1480 static void
tu_cache_init(struct tu_cache_state * cache)1481 tu_cache_init(struct tu_cache_state *cache)
1482 {
1483    cache->flush_bits = 0;
1484    cache->pending_flush_bits = TU_CMD_FLAG_ALL_INVALIDATE;
1485 }
1486 
1487 VkResult
tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,const VkCommandBufferBeginInfo * pBeginInfo)1488 tu_BeginCommandBuffer(VkCommandBuffer commandBuffer,
1489                       const VkCommandBufferBeginInfo *pBeginInfo)
1490 {
1491    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
1492    VkResult result = VK_SUCCESS;
1493 
1494    if (cmd_buffer->status != TU_CMD_BUFFER_STATUS_INITIAL) {
1495       /* If the command buffer has already been resetted with
1496        * vkResetCommandBuffer, no need to do it again.
1497        */
1498       result = tu_reset_cmd_buffer(cmd_buffer);
1499       if (result != VK_SUCCESS)
1500          return result;
1501    }
1502 
1503    memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
1504    cmd_buffer->state.index_size = 0xff; /* dirty restart index */
1505 
1506    tu_cache_init(&cmd_buffer->state.cache);
1507    tu_cache_init(&cmd_buffer->state.renderpass_cache);
1508    cmd_buffer->usage_flags = pBeginInfo->flags;
1509 
1510    tu_cs_begin(&cmd_buffer->cs);
1511    tu_cs_begin(&cmd_buffer->draw_cs);
1512    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
1513 
1514    /* setup initial configuration into command buffer */
1515    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
1516       switch (cmd_buffer->queue_family_index) {
1517       case TU_QUEUE_GENERAL:
1518          tu6_init_hw(cmd_buffer, &cmd_buffer->cs);
1519          break;
1520       default:
1521          break;
1522       }
1523    } else if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1524       assert(pBeginInfo->pInheritanceInfo);
1525 
1526       vk_foreach_struct(ext, pBeginInfo->pInheritanceInfo) {
1527          switch (ext->sType) {
1528          case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT: {
1529             const VkCommandBufferInheritanceConditionalRenderingInfoEXT *cond_rend = (void *) ext;
1530             cmd_buffer->state.predication_active = cond_rend->conditionalRenderingEnable;
1531             break;
1532          default:
1533             break;
1534          }
1535          }
1536       }
1537 
1538       if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1539          cmd_buffer->state.pass = tu_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
1540          cmd_buffer->state.subpass =
1541             &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1542       } else {
1543          /* When executing in the middle of another command buffer, the CCU
1544           * state is unknown.
1545           */
1546          cmd_buffer->state.ccu_state = TU_CMD_CCU_UNKNOWN;
1547       }
1548    }
1549 
1550    cmd_buffer->status = TU_CMD_BUFFER_STATUS_RECORDING;
1551 
1552    return VK_SUCCESS;
1553 }
1554 
1555 void
tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)1556 tu_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
1557                         uint32_t firstBinding,
1558                         uint32_t bindingCount,
1559                         const VkBuffer *pBuffers,
1560                         const VkDeviceSize *pOffsets)
1561 {
1562    tu_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding, bindingCount,
1563                                pBuffers, pOffsets, NULL, NULL);
1564 }
1565 
1566 void
tu_CmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets,const VkDeviceSize * pSizes,const VkDeviceSize * pStrides)1567 tu_CmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer,
1568                             uint32_t firstBinding,
1569                             uint32_t bindingCount,
1570                             const VkBuffer* pBuffers,
1571                             const VkDeviceSize* pOffsets,
1572                             const VkDeviceSize* pSizes,
1573                             const VkDeviceSize* pStrides)
1574 {
1575    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1576    struct tu_cs cs;
1577    /* TODO: track a "max_vb" value for the cmdbuf to save a bit of memory  */
1578    cmd->state.vertex_buffers.iova = tu_cs_draw_state(&cmd->sub_cs, &cs, 4 * MAX_VBS).iova;
1579 
1580    for (uint32_t i = 0; i < bindingCount; i++) {
1581       struct tu_buffer *buf = tu_buffer_from_handle(pBuffers[i]);
1582 
1583       cmd->state.vb[firstBinding + i].base = tu_buffer_iova(buf) + pOffsets[i];
1584       cmd->state.vb[firstBinding + i].size = pSizes ? pSizes[i] : (buf->size - pOffsets[i]);
1585       if (pStrides)
1586          cmd->state.vb[firstBinding + i].stride = pStrides[i];
1587    }
1588 
1589    for (uint32_t i = 0; i < MAX_VBS; i++) {
1590       tu_cs_emit_regs(&cs,
1591                       A6XX_VFD_FETCH_BASE_LO(i, cmd->state.vb[i].base),
1592                       A6XX_VFD_FETCH_BASE_HI(i, cmd->state.vb[i].base >> 32),
1593                       A6XX_VFD_FETCH_SIZE(i, cmd->state.vb[i].size));
1594    }
1595 
1596    cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
1597 
1598    if (pStrides) {
1599       cmd->state.dynamic_state[TU_DYNAMIC_STATE_VB_STRIDE].iova =
1600          tu_cs_draw_state(&cmd->sub_cs, &cs, 2 * MAX_VBS).iova;
1601 
1602       for (uint32_t i = 0; i < MAX_VBS; i++)
1603          tu_cs_emit_regs(&cs, A6XX_VFD_FETCH_STRIDE(i, cmd->state.vb[i].stride));
1604 
1605       cmd->state.dirty |= TU_CMD_DIRTY_VB_STRIDE;
1606    }
1607 }
1608 
1609 void
tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)1610 tu_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,
1611                       VkBuffer buffer,
1612                       VkDeviceSize offset,
1613                       VkIndexType indexType)
1614 {
1615    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1616    TU_FROM_HANDLE(tu_buffer, buf, buffer);
1617 
1618 
1619 
1620    uint32_t index_size, index_shift, restart_index;
1621 
1622    switch (indexType) {
1623    case VK_INDEX_TYPE_UINT16:
1624       index_size = INDEX4_SIZE_16_BIT;
1625       index_shift = 1;
1626       restart_index = 0xffff;
1627       break;
1628    case VK_INDEX_TYPE_UINT32:
1629       index_size = INDEX4_SIZE_32_BIT;
1630       index_shift = 2;
1631       restart_index = 0xffffffff;
1632       break;
1633    case VK_INDEX_TYPE_UINT8_EXT:
1634       index_size = INDEX4_SIZE_8_BIT;
1635       index_shift = 0;
1636       restart_index = 0xff;
1637       break;
1638    default:
1639       unreachable("invalid VkIndexType");
1640    }
1641 
1642    /* initialize/update the restart index */
1643    if (cmd->state.index_size != index_size)
1644       tu_cs_emit_regs(&cmd->draw_cs, A6XX_PC_RESTART_INDEX(restart_index));
1645 
1646    assert(buf->size >= offset);
1647 
1648    cmd->state.index_va = buf->bo->iova + buf->bo_offset + offset;
1649    cmd->state.max_index_count = (buf->size - offset) >> index_shift;
1650    cmd->state.index_size = index_size;
1651 }
1652 
1653 void
tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout _layout,uint32_t firstSet,uint32_t descriptorSetCount,const VkDescriptorSet * pDescriptorSets,uint32_t dynamicOffsetCount,const uint32_t * pDynamicOffsets)1654 tu_CmdBindDescriptorSets(VkCommandBuffer commandBuffer,
1655                          VkPipelineBindPoint pipelineBindPoint,
1656                          VkPipelineLayout _layout,
1657                          uint32_t firstSet,
1658                          uint32_t descriptorSetCount,
1659                          const VkDescriptorSet *pDescriptorSets,
1660                          uint32_t dynamicOffsetCount,
1661                          const uint32_t *pDynamicOffsets)
1662 {
1663    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1664    TU_FROM_HANDLE(tu_pipeline_layout, layout, _layout);
1665    unsigned dyn_idx = 0;
1666 
1667    struct tu_descriptor_state *descriptors_state =
1668       tu_get_descriptors_state(cmd, pipelineBindPoint);
1669 
1670    for (unsigned i = 0; i < descriptorSetCount; ++i) {
1671       unsigned idx = i + firstSet;
1672       TU_FROM_HANDLE(tu_descriptor_set, set, pDescriptorSets[i]);
1673 
1674       descriptors_state->sets[idx] = set;
1675 
1676       for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
1677          /* update the contents of the dynamic descriptor set */
1678          unsigned src_idx = j;
1679          unsigned dst_idx = j + layout->set[idx].dynamic_offset_start;
1680          assert(dyn_idx < dynamicOffsetCount);
1681 
1682          uint32_t *dst =
1683             &descriptors_state->dynamic_descriptors[dst_idx * A6XX_TEX_CONST_DWORDS];
1684          uint32_t *src =
1685             &set->dynamic_descriptors[src_idx * A6XX_TEX_CONST_DWORDS];
1686          uint32_t offset = pDynamicOffsets[dyn_idx];
1687 
1688          /* Patch the storage/uniform descriptors right away. */
1689          if (layout->set[idx].layout->dynamic_ubo & (1 << j)) {
1690             /* Note: we can assume here that the addition won't roll over and
1691              * change the SIZE field.
1692              */
1693             uint64_t va = src[0] | ((uint64_t)src[1] << 32);
1694             va += offset;
1695             dst[0] = va;
1696             dst[1] = va >> 32;
1697          } else {
1698             memcpy(dst, src, A6XX_TEX_CONST_DWORDS * 4);
1699             /* Note: A6XX_IBO_5_DEPTH is always 0 */
1700             uint64_t va = dst[4] | ((uint64_t)dst[5] << 32);
1701             va += offset;
1702             dst[4] = va;
1703             dst[5] = va >> 32;
1704          }
1705       }
1706    }
1707    assert(dyn_idx == dynamicOffsetCount);
1708 
1709    uint32_t sp_bindless_base_reg, hlsq_bindless_base_reg, hlsq_invalidate_value;
1710    uint64_t addr[MAX_SETS + 1] = {};
1711    struct tu_cs *cs, state_cs;
1712 
1713    for (uint32_t i = 0; i < MAX_SETS; i++) {
1714       struct tu_descriptor_set *set = descriptors_state->sets[i];
1715       if (set)
1716          addr[i] = set->va | 3;
1717    }
1718 
1719    if (layout->dynamic_offset_count) {
1720       /* allocate and fill out dynamic descriptor set */
1721       struct tu_cs_memory dynamic_desc_set;
1722       VkResult result = tu_cs_alloc(&cmd->sub_cs, layout->dynamic_offset_count,
1723                                     A6XX_TEX_CONST_DWORDS, &dynamic_desc_set);
1724       if (result != VK_SUCCESS) {
1725          cmd->record_result = result;
1726          return;
1727       }
1728 
1729       memcpy(dynamic_desc_set.map, descriptors_state->dynamic_descriptors,
1730              layout->dynamic_offset_count * A6XX_TEX_CONST_DWORDS * 4);
1731       addr[MAX_SETS] = dynamic_desc_set.iova | 3;
1732    }
1733 
1734    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1735       sp_bindless_base_reg = REG_A6XX_SP_BINDLESS_BASE(0);
1736       hlsq_bindless_base_reg = REG_A6XX_HLSQ_BINDLESS_BASE(0);
1737       hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_GFX_BINDLESS(0x1f);
1738 
1739       cmd->state.desc_sets = tu_cs_draw_state(&cmd->sub_cs, &state_cs, 24);
1740       cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS;
1741       cs = &state_cs;
1742    } else {
1743       assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE);
1744 
1745       sp_bindless_base_reg = REG_A6XX_SP_CS_BINDLESS_BASE(0);
1746       hlsq_bindless_base_reg = REG_A6XX_HLSQ_CS_BINDLESS_BASE(0);
1747       hlsq_invalidate_value = A6XX_HLSQ_INVALIDATE_CMD_CS_BINDLESS(0x1f);
1748 
1749       cmd->state.dirty |= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
1750       cs = &cmd->cs;
1751    }
1752 
1753    tu_cs_emit_pkt4(cs, sp_bindless_base_reg, 10);
1754    tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
1755    tu_cs_emit_pkt4(cs, hlsq_bindless_base_reg, 10);
1756    tu_cs_emit_array(cs, (const uint32_t*) addr, 10);
1757    tu_cs_emit_regs(cs, A6XX_HLSQ_INVALIDATE_CMD(.dword = hlsq_invalidate_value));
1758 
1759    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
1760       assert(cs->cur == cs->end); /* validate draw state size */
1761       tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
1762       tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
1763    }
1764 }
1765 
tu_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipelineLayout _layout,uint32_t _set,uint32_t descriptorWriteCount,const VkWriteDescriptorSet * pDescriptorWrites)1766 void tu_CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer,
1767                                 VkPipelineBindPoint pipelineBindPoint,
1768                                 VkPipelineLayout _layout,
1769                                 uint32_t _set,
1770                                 uint32_t descriptorWriteCount,
1771                                 const VkWriteDescriptorSet *pDescriptorWrites)
1772 {
1773    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1774    TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
1775    struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
1776    struct tu_descriptor_set *set =
1777       &tu_get_descriptors_state(cmd, pipelineBindPoint)->push_set;
1778 
1779    struct tu_cs_memory set_mem;
1780    VkResult result = tu_cs_alloc(&cmd->sub_cs,
1781                                  DIV_ROUND_UP(layout->size, A6XX_TEX_CONST_DWORDS * 4),
1782                                  A6XX_TEX_CONST_DWORDS, &set_mem);
1783    if (result != VK_SUCCESS) {
1784       cmd->record_result = result;
1785       return;
1786    }
1787 
1788    /* preserve previous content if the layout is the same: */
1789    if (set->layout == layout)
1790       memcpy(set_mem.map, set->mapped_ptr, MIN2(set->size, layout->size));
1791 
1792    set->layout = layout;
1793    set->mapped_ptr = set_mem.map;
1794    set->va = set_mem.iova;
1795 
1796    tu_update_descriptor_sets(tu_descriptor_set_to_handle(set),
1797                              descriptorWriteCount, pDescriptorWrites, 0, NULL);
1798 
1799    tu_CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, _layout, _set,
1800                             1, (VkDescriptorSet[]) { tu_descriptor_set_to_handle(set) },
1801                             0, NULL);
1802 }
1803 
tu_CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,VkDescriptorUpdateTemplate descriptorUpdateTemplate,VkPipelineLayout _layout,uint32_t _set,const void * pData)1804 void tu_CmdPushDescriptorSetWithTemplateKHR(
1805    VkCommandBuffer commandBuffer,
1806    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1807    VkPipelineLayout _layout,
1808    uint32_t _set,
1809    const void* pData)
1810 {
1811    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1812    TU_FROM_HANDLE(tu_pipeline_layout, pipe_layout, _layout);
1813    TU_FROM_HANDLE(tu_descriptor_update_template, templ, descriptorUpdateTemplate);
1814    struct tu_descriptor_set_layout *layout = pipe_layout->set[_set].layout;
1815    struct tu_descriptor_set *set =
1816       &tu_get_descriptors_state(cmd, templ->bind_point)->push_set;
1817 
1818    struct tu_cs_memory set_mem;
1819    VkResult result = tu_cs_alloc(&cmd->sub_cs,
1820                                  DIV_ROUND_UP(layout->size, A6XX_TEX_CONST_DWORDS * 4),
1821                                  A6XX_TEX_CONST_DWORDS, &set_mem);
1822    if (result != VK_SUCCESS) {
1823       cmd->record_result = result;
1824       return;
1825    }
1826 
1827    /* preserve previous content if the layout is the same: */
1828    if (set->layout == layout)
1829       memcpy(set_mem.map, set->mapped_ptr, MIN2(set->size, layout->size));
1830 
1831    set->layout = layout;
1832    set->mapped_ptr = set_mem.map;
1833    set->va = set_mem.iova;
1834 
1835    tu_update_descriptor_set_with_template(set, descriptorUpdateTemplate, pData);
1836 
1837    tu_CmdBindDescriptorSets(commandBuffer, templ->bind_point, _layout, _set,
1838                             1, (VkDescriptorSet[]) { tu_descriptor_set_to_handle(set) },
1839                             0, NULL);
1840 }
1841 
tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets,const VkDeviceSize * pSizes)1842 void tu_CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer,
1843                                            uint32_t firstBinding,
1844                                            uint32_t bindingCount,
1845                                            const VkBuffer *pBuffers,
1846                                            const VkDeviceSize *pOffsets,
1847                                            const VkDeviceSize *pSizes)
1848 {
1849    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1850    struct tu_cs *cs = &cmd->draw_cs;
1851 
1852    /* using COND_REG_EXEC for xfb commands matches the blob behavior
1853     * presumably there isn't any benefit using a draw state when the
1854     * condition is (SYSMEM | BINNING)
1855     */
1856    tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1857                           CP_COND_REG_EXEC_0_SYSMEM |
1858                           CP_COND_REG_EXEC_0_BINNING);
1859 
1860    for (uint32_t i = 0; i < bindingCount; i++) {
1861       TU_FROM_HANDLE(tu_buffer, buf, pBuffers[i]);
1862       uint64_t iova = buf->bo->iova + pOffsets[i];
1863       uint32_t size = buf->bo->size - pOffsets[i];
1864       uint32_t idx = i + firstBinding;
1865 
1866       if (pSizes && pSizes[i] != VK_WHOLE_SIZE)
1867          size = pSizes[i];
1868 
1869       /* BUFFER_BASE is 32-byte aligned, add remaining offset to BUFFER_OFFSET */
1870       uint32_t offset = iova & 0x1f;
1871       iova &= ~(uint64_t) 0x1f;
1872 
1873       tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_BUFFER_BASE(idx), 3);
1874       tu_cs_emit_qw(cs, iova);
1875       tu_cs_emit(cs, size + offset);
1876 
1877       cmd->state.streamout_offset[idx] = offset;
1878    }
1879 
1880    tu_cond_exec_end(cs);
1881 }
1882 
1883 void
tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,uint32_t firstCounterBuffer,uint32_t counterBufferCount,const VkBuffer * pCounterBuffers,const VkDeviceSize * pCounterBufferOffsets)1884 tu_CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1885                                 uint32_t firstCounterBuffer,
1886                                 uint32_t counterBufferCount,
1887                                 const VkBuffer *pCounterBuffers,
1888                                 const VkDeviceSize *pCounterBufferOffsets)
1889 {
1890    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1891    struct tu_cs *cs = &cmd->draw_cs;
1892 
1893    tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1894                           CP_COND_REG_EXEC_0_SYSMEM |
1895                           CP_COND_REG_EXEC_0_BINNING);
1896 
1897    /* TODO: only update offset for active buffers */
1898    for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++)
1899       tu_cs_emit_regs(cs, A6XX_VPC_SO_BUFFER_OFFSET(i, cmd->state.streamout_offset[i]));
1900 
1901    for (uint32_t i = 0; i < (pCounterBuffers ? counterBufferCount : 0); i++) {
1902       uint32_t idx = firstCounterBuffer + i;
1903       uint32_t offset = cmd->state.streamout_offset[idx];
1904 
1905       if (!pCounterBuffers[i])
1906          continue;
1907 
1908       TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1909 
1910       tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1911       tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1912                      CP_MEM_TO_REG_0_UNK31 |
1913                      CP_MEM_TO_REG_0_CNT(1));
1914       tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1915 
1916       if (offset) {
1917          tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1918          tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_VPC_SO_BUFFER_OFFSET(idx)) |
1919                         CP_REG_RMW_0_SRC1_ADD);
1920          tu_cs_emit_qw(cs, 0xffffffff);
1921          tu_cs_emit_qw(cs, offset);
1922       }
1923    }
1924 
1925    tu_cond_exec_end(cs);
1926 }
1927 
tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,uint32_t firstCounterBuffer,uint32_t counterBufferCount,const VkBuffer * pCounterBuffers,const VkDeviceSize * pCounterBufferOffsets)1928 void tu_CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer,
1929                                        uint32_t firstCounterBuffer,
1930                                        uint32_t counterBufferCount,
1931                                        const VkBuffer *pCounterBuffers,
1932                                        const VkDeviceSize *pCounterBufferOffsets)
1933 {
1934    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1935    struct tu_cs *cs = &cmd->draw_cs;
1936 
1937    tu_cond_exec_start(cs, CP_COND_REG_EXEC_0_MODE(RENDER_MODE) |
1938                           CP_COND_REG_EXEC_0_SYSMEM |
1939                           CP_COND_REG_EXEC_0_BINNING);
1940 
1941    /* TODO: only flush buffers that need to be flushed */
1942    for (uint32_t i = 0; i < IR3_MAX_SO_BUFFERS; i++) {
1943       /* note: FLUSH_BASE is always the same, so it could go in init_hw()? */
1944       tu_cs_emit_pkt4(cs, REG_A6XX_VPC_SO_FLUSH_BASE(i), 2);
1945       tu_cs_emit_qw(cs, global_iova(cmd, flush_base[i]));
1946       tu6_emit_event_write(cmd, cs, FLUSH_SO_0 + i);
1947    }
1948 
1949    for (uint32_t i = 0; i < (pCounterBuffers ? counterBufferCount : 0); i++) {
1950       uint32_t idx = firstCounterBuffer + i;
1951       uint32_t offset = cmd->state.streamout_offset[idx];
1952 
1953       if (!pCounterBuffers[i])
1954          continue;
1955 
1956       TU_FROM_HANDLE(tu_buffer, buf, pCounterBuffers[i]);
1957 
1958       /* VPC_SO_FLUSH_BASE has dwords counter, but counter should be in bytes */
1959       tu_cs_emit_pkt7(cs, CP_MEM_TO_REG, 3);
1960       tu_cs_emit(cs, CP_MEM_TO_REG_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1961                      CP_MEM_TO_REG_0_SHIFT_BY_2 |
1962                      0x40000 | /* ??? */
1963                      CP_MEM_TO_REG_0_UNK31 |
1964                      CP_MEM_TO_REG_0_CNT(1));
1965       tu_cs_emit_qw(cs, global_iova(cmd, flush_base[idx]));
1966 
1967       if (offset) {
1968          tu_cs_emit_pkt7(cs, CP_REG_RMW, 3);
1969          tu_cs_emit(cs, CP_REG_RMW_0_DST_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1970                         CP_REG_RMW_0_SRC1_ADD);
1971          tu_cs_emit_qw(cs, 0xffffffff);
1972          tu_cs_emit_qw(cs, -offset);
1973       }
1974 
1975       tu_cs_emit_pkt7(cs, CP_REG_TO_MEM, 3);
1976       tu_cs_emit(cs, CP_REG_TO_MEM_0_REG(REG_A6XX_CP_SCRATCH_REG(0)) |
1977                      CP_REG_TO_MEM_0_CNT(1));
1978       tu_cs_emit_qw(cs, buf->bo->iova + pCounterBufferOffsets[i]);
1979    }
1980 
1981    tu_cond_exec_end(cs);
1982 
1983    cmd->state.xfb_used = true;
1984 }
1985 
1986 void
tu_CmdPushConstants(VkCommandBuffer commandBuffer,VkPipelineLayout layout,VkShaderStageFlags stageFlags,uint32_t offset,uint32_t size,const void * pValues)1987 tu_CmdPushConstants(VkCommandBuffer commandBuffer,
1988                     VkPipelineLayout layout,
1989                     VkShaderStageFlags stageFlags,
1990                     uint32_t offset,
1991                     uint32_t size,
1992                     const void *pValues)
1993 {
1994    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
1995    memcpy((void*) cmd->push_constants + offset, pValues, size);
1996    cmd->state.dirty |= TU_CMD_DIRTY_SHADER_CONSTS;
1997 }
1998 
1999 /* Flush everything which has been made available but we haven't actually
2000  * flushed yet.
2001  */
2002 static void
tu_flush_all_pending(struct tu_cache_state * cache)2003 tu_flush_all_pending(struct tu_cache_state *cache)
2004 {
2005    cache->flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
2006    cache->pending_flush_bits &= ~TU_CMD_FLAG_ALL_FLUSH;
2007 }
2008 
2009 VkResult
tu_EndCommandBuffer(VkCommandBuffer commandBuffer)2010 tu_EndCommandBuffer(VkCommandBuffer commandBuffer)
2011 {
2012    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
2013 
2014    /* We currently flush CCU at the end of the command buffer, like
2015     * what the blob does. There's implicit synchronization around every
2016     * vkQueueSubmit, but the kernel only flushes the UCHE, and we don't
2017     * know yet if this command buffer will be the last in the submit so we
2018     * have to defensively flush everything else.
2019     *
2020     * TODO: We could definitely do better than this, since these flushes
2021     * aren't required by Vulkan, but we'd need kernel support to do that.
2022     * Ideally, we'd like the kernel to flush everything afterwards, so that we
2023     * wouldn't have to do any flushes here, and when submitting multiple
2024     * command buffers there wouldn't be any unnecessary flushes in between.
2025     */
2026    if (cmd_buffer->state.pass) {
2027       tu_flush_all_pending(&cmd_buffer->state.renderpass_cache);
2028       tu_emit_cache_flush_renderpass(cmd_buffer, &cmd_buffer->draw_cs);
2029    } else {
2030       tu_flush_all_pending(&cmd_buffer->state.cache);
2031       cmd_buffer->state.cache.flush_bits |=
2032          TU_CMD_FLAG_CCU_FLUSH_COLOR |
2033          TU_CMD_FLAG_CCU_FLUSH_DEPTH;
2034       tu_emit_cache_flush(cmd_buffer, &cmd_buffer->cs);
2035    }
2036 
2037    tu_cs_end(&cmd_buffer->cs);
2038    tu_cs_end(&cmd_buffer->draw_cs);
2039    tu_cs_end(&cmd_buffer->draw_epilogue_cs);
2040 
2041    cmd_buffer->status = TU_CMD_BUFFER_STATUS_EXECUTABLE;
2042 
2043    return cmd_buffer->record_result;
2044 }
2045 
2046 static struct tu_cs
tu_cmd_dynamic_state(struct tu_cmd_buffer * cmd,uint32_t id,uint32_t size)2047 tu_cmd_dynamic_state(struct tu_cmd_buffer *cmd, uint32_t id, uint32_t size)
2048 {
2049    struct tu_cs cs;
2050 
2051    assert(id < ARRAY_SIZE(cmd->state.dynamic_state));
2052    cmd->state.dynamic_state[id] = tu_cs_draw_state(&cmd->sub_cs, &cs, size);
2053 
2054    tu_cs_emit_pkt7(&cmd->draw_cs, CP_SET_DRAW_STATE, 3);
2055    tu_cs_emit_draw_state(&cmd->draw_cs, TU_DRAW_STATE_DYNAMIC + id, cmd->state.dynamic_state[id]);
2056 
2057    return cs;
2058 }
2059 
2060 void
tu_CmdBindPipeline(VkCommandBuffer commandBuffer,VkPipelineBindPoint pipelineBindPoint,VkPipeline _pipeline)2061 tu_CmdBindPipeline(VkCommandBuffer commandBuffer,
2062                    VkPipelineBindPoint pipelineBindPoint,
2063                    VkPipeline _pipeline)
2064 {
2065    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2066    TU_FROM_HANDLE(tu_pipeline, pipeline, _pipeline);
2067 
2068    if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
2069       cmd->state.compute_pipeline = pipeline;
2070       tu_cs_emit_state_ib(&cmd->cs, pipeline->program.state);
2071       return;
2072    }
2073 
2074    assert(pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS);
2075 
2076    cmd->state.pipeline = pipeline;
2077    cmd->state.dirty |= TU_CMD_DIRTY_DESC_SETS_LOAD | TU_CMD_DIRTY_SHADER_CONSTS | TU_CMD_DIRTY_LRZ;
2078 
2079    struct tu_cs *cs = &cmd->draw_cs;
2080    uint32_t mask = ~pipeline->dynamic_state_mask & BITFIELD_MASK(TU_DYNAMIC_STATE_COUNT);
2081    uint32_t i;
2082 
2083    tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (6 + util_bitcount(mask)));
2084    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
2085    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
2086    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
2087    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
2088    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
2089    tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
2090 
2091    for_each_bit(i, mask)
2092       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i, pipeline->dynamic_state[i]);
2093 
2094    /* the vertex_buffers draw state always contains all the currently
2095     * bound vertex buffers. update its size to only emit the vbs which
2096     * are actually used by the pipeline
2097     * note there is a HW optimization which makes it so the draw state
2098     * is not re-executed completely when only the size changes
2099     */
2100    if (cmd->state.vertex_buffers.size != pipeline->num_vbs * 4) {
2101       cmd->state.vertex_buffers.size = pipeline->num_vbs * 4;
2102       cmd->state.dirty |= TU_CMD_DIRTY_VERTEX_BUFFERS;
2103    }
2104 
2105    if ((pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_VB_STRIDE)) &&
2106        cmd->state.dynamic_state[TU_DYNAMIC_STATE_VB_STRIDE].size != pipeline->num_vbs * 2) {
2107       cmd->state.dynamic_state[TU_DYNAMIC_STATE_VB_STRIDE].size = pipeline->num_vbs * 2;
2108       cmd->state.dirty |= TU_CMD_DIRTY_VB_STRIDE;
2109    }
2110 
2111 #define UPDATE_REG(X, Y) {                                           \
2112    /* note: would be better to have pipeline bits already masked */  \
2113    uint32_t pipeline_bits = pipeline->X & pipeline->X##_mask;        \
2114    if ((cmd->state.X & pipeline->X##_mask) != pipeline_bits) {       \
2115       cmd->state.X &= ~pipeline->X##_mask;                           \
2116       cmd->state.X |= pipeline_bits;                                 \
2117       cmd->state.dirty |= TU_CMD_DIRTY_##Y;                          \
2118    }                                                                 \
2119    if (!(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_##Y)))  \
2120       cmd->state.dirty &= ~TU_CMD_DIRTY_##Y;                         \
2121 }
2122 
2123    /* these registers can have bits set from both pipeline and dynamic state
2124     * this updates the bits set by the pipeline
2125     * if the pipeline doesn't use a dynamic state for the register, then
2126     * the relevant dirty bit is cleared to avoid overriding the non-dynamic
2127     * state with a dynamic state the next draw.
2128     */
2129    UPDATE_REG(gras_su_cntl, GRAS_SU_CNTL);
2130    UPDATE_REG(rb_depth_cntl, RB_DEPTH_CNTL);
2131    UPDATE_REG(rb_stencil_cntl, RB_STENCIL_CNTL);
2132 #undef UPDATE_REG
2133 
2134    if (pipeline->rb_depth_cntl_disable)
2135       cmd->state.dirty |= TU_CMD_DIRTY_RB_DEPTH_CNTL;
2136 }
2137 
2138 void
tu_CmdSetViewport(VkCommandBuffer commandBuffer,uint32_t firstViewport,uint32_t viewportCount,const VkViewport * pViewports)2139 tu_CmdSetViewport(VkCommandBuffer commandBuffer,
2140                   uint32_t firstViewport,
2141                   uint32_t viewportCount,
2142                   const VkViewport *pViewports)
2143 {
2144    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2145    struct tu_cs cs;
2146 
2147    memcpy(&cmd->state.viewport[firstViewport], pViewports, viewportCount * sizeof(*pViewports));
2148    cmd->state.max_viewport = MAX2(cmd->state.max_viewport, firstViewport + viewportCount);
2149 
2150    cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_VIEWPORT, 8 + 10 * cmd->state.max_viewport);
2151    tu6_emit_viewport(&cs, cmd->state.viewport, cmd->state.max_viewport);
2152 }
2153 
2154 void
tu_CmdSetScissor(VkCommandBuffer commandBuffer,uint32_t firstScissor,uint32_t scissorCount,const VkRect2D * pScissors)2155 tu_CmdSetScissor(VkCommandBuffer commandBuffer,
2156                  uint32_t firstScissor,
2157                  uint32_t scissorCount,
2158                  const VkRect2D *pScissors)
2159 {
2160    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2161    struct tu_cs cs;
2162 
2163    memcpy(&cmd->state.scissor[firstScissor], pScissors, scissorCount * sizeof(*pScissors));
2164    cmd->state.max_scissor = MAX2(cmd->state.max_scissor, firstScissor + scissorCount);
2165 
2166    cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_SCISSOR, 1 + 2 * cmd->state.max_scissor);
2167    tu6_emit_scissor(&cs, cmd->state.scissor, cmd->state.max_scissor);
2168 }
2169 
2170 void
tu_CmdSetLineWidth(VkCommandBuffer commandBuffer,float lineWidth)2171 tu_CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
2172 {
2173    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2174 
2175    cmd->state.gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
2176    cmd->state.gras_su_cntl |= A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(lineWidth / 2.0f);
2177 
2178    cmd->state.dirty |= TU_CMD_DIRTY_GRAS_SU_CNTL;
2179 }
2180 
2181 void
tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,float depthBiasConstantFactor,float depthBiasClamp,float depthBiasSlopeFactor)2182 tu_CmdSetDepthBias(VkCommandBuffer commandBuffer,
2183                    float depthBiasConstantFactor,
2184                    float depthBiasClamp,
2185                    float depthBiasSlopeFactor)
2186 {
2187    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2188    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BIAS, 4);
2189 
2190    tu6_emit_depth_bias(&cs, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
2191 }
2192 
2193 void
tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,const float blendConstants[4])2194 tu_CmdSetBlendConstants(VkCommandBuffer commandBuffer,
2195                         const float blendConstants[4])
2196 {
2197    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2198    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_BLEND_CONSTANTS, 5);
2199 
2200    tu_cs_emit_pkt4(&cs, REG_A6XX_RB_BLEND_RED_F32, 4);
2201    tu_cs_emit_array(&cs, (const uint32_t *) blendConstants, 4);
2202 }
2203 
2204 void
tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,float minDepthBounds,float maxDepthBounds)2205 tu_CmdSetDepthBounds(VkCommandBuffer commandBuffer,
2206                      float minDepthBounds,
2207                      float maxDepthBounds)
2208 {
2209    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2210    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_DEPTH_BOUNDS, 3);
2211 
2212    tu_cs_emit_regs(&cs,
2213                    A6XX_RB_Z_BOUNDS_MIN(minDepthBounds),
2214                    A6XX_RB_Z_BOUNDS_MAX(maxDepthBounds));
2215 }
2216 
2217 static void
update_stencil_mask(uint32_t * value,VkStencilFaceFlags face,uint32_t mask)2218 update_stencil_mask(uint32_t *value, VkStencilFaceFlags face, uint32_t mask)
2219 {
2220    if (face & VK_STENCIL_FACE_FRONT_BIT)
2221       *value = (*value & 0xff00) | (mask & 0xff);
2222    if (face & VK_STENCIL_FACE_BACK_BIT)
2223       *value = (*value & 0xff) | (mask & 0xff) << 8;
2224 }
2225 
2226 void
tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t compareMask)2227 tu_CmdSetStencilCompareMask(VkCommandBuffer commandBuffer,
2228                             VkStencilFaceFlags faceMask,
2229                             uint32_t compareMask)
2230 {
2231    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2232    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, 2);
2233 
2234    update_stencil_mask(&cmd->state.dynamic_stencil_mask, faceMask, compareMask);
2235 
2236    tu_cs_emit_regs(&cs, A6XX_RB_STENCILMASK(.dword = cmd->state.dynamic_stencil_mask));
2237 }
2238 
2239 void
tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t writeMask)2240 tu_CmdSetStencilWriteMask(VkCommandBuffer commandBuffer,
2241                           VkStencilFaceFlags faceMask,
2242                           uint32_t writeMask)
2243 {
2244    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2245    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, 2);
2246 
2247    update_stencil_mask(&cmd->state.dynamic_stencil_wrmask, faceMask, writeMask);
2248 
2249    tu_cs_emit_regs(&cs, A6XX_RB_STENCILWRMASK(.dword = cmd->state.dynamic_stencil_wrmask));
2250 }
2251 
2252 void
tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,uint32_t reference)2253 tu_CmdSetStencilReference(VkCommandBuffer commandBuffer,
2254                           VkStencilFaceFlags faceMask,
2255                           uint32_t reference)
2256 {
2257    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2258    struct tu_cs cs = tu_cmd_dynamic_state(cmd, VK_DYNAMIC_STATE_STENCIL_REFERENCE, 2);
2259 
2260    update_stencil_mask(&cmd->state.dynamic_stencil_ref, faceMask, reference);
2261 
2262    tu_cs_emit_regs(&cs, A6XX_RB_STENCILREF(.dword = cmd->state.dynamic_stencil_ref));
2263 }
2264 
2265 void
tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,const VkSampleLocationsInfoEXT * pSampleLocationsInfo)2266 tu_CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
2267                             const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
2268 {
2269    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2270    struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_SAMPLE_LOCATIONS, 9);
2271 
2272    assert(pSampleLocationsInfo);
2273 
2274    tu6_emit_sample_locations(&cs, pSampleLocationsInfo);
2275 }
2276 
2277 void
tu_CmdSetCullModeEXT(VkCommandBuffer commandBuffer,VkCullModeFlags cullMode)2278 tu_CmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode)
2279 {
2280    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2281 
2282    cmd->state.gras_su_cntl &=
2283       ~(A6XX_GRAS_SU_CNTL_CULL_FRONT | A6XX_GRAS_SU_CNTL_CULL_BACK);
2284 
2285    if (cullMode & VK_CULL_MODE_FRONT_BIT)
2286       cmd->state.gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_FRONT;
2287    if (cullMode & VK_CULL_MODE_BACK_BIT)
2288       cmd->state.gras_su_cntl |= A6XX_GRAS_SU_CNTL_CULL_BACK;
2289 
2290    cmd->state.dirty |= TU_CMD_DIRTY_GRAS_SU_CNTL;
2291 }
2292 
2293 void
tu_CmdSetFrontFaceEXT(VkCommandBuffer commandBuffer,VkFrontFace frontFace)2294 tu_CmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace)
2295 {
2296    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2297 
2298    cmd->state.gras_su_cntl &= ~A6XX_GRAS_SU_CNTL_FRONT_CW;
2299 
2300    if (frontFace == VK_FRONT_FACE_CLOCKWISE)
2301       cmd->state.gras_su_cntl |= A6XX_GRAS_SU_CNTL_FRONT_CW;
2302 
2303    cmd->state.dirty |= TU_CMD_DIRTY_GRAS_SU_CNTL;
2304 }
2305 
2306 void
tu_CmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,VkPrimitiveTopology primitiveTopology)2307 tu_CmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,
2308                               VkPrimitiveTopology primitiveTopology)
2309 {
2310    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2311 
2312    cmd->state.primtype = tu6_primtype(primitiveTopology);
2313 }
2314 
2315 void
tu_CmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer,uint32_t viewportCount,const VkViewport * pViewports)2316 tu_CmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer,
2317                               uint32_t viewportCount,
2318                               const VkViewport* pViewports)
2319 {
2320    tu_CmdSetViewport(commandBuffer, 0, viewportCount, pViewports);
2321 }
2322 
2323 void
tu_CmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer,uint32_t scissorCount,const VkRect2D * pScissors)2324 tu_CmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer,
2325                              uint32_t scissorCount,
2326                              const VkRect2D* pScissors)
2327 {
2328    tu_CmdSetScissor(commandBuffer, 0, scissorCount, pScissors);
2329 }
2330 
2331 void
tu_CmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer,VkBool32 depthTestEnable)2332 tu_CmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer,
2333                             VkBool32 depthTestEnable)
2334 {
2335    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2336 
2337    cmd->state.rb_depth_cntl &= ~A6XX_RB_DEPTH_CNTL_Z_ENABLE;
2338 
2339    if (depthTestEnable)
2340       cmd->state.rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_ENABLE;
2341 
2342    cmd->state.dirty |= TU_CMD_DIRTY_RB_DEPTH_CNTL;
2343 }
2344 
2345 void
tu_CmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer,VkBool32 depthWriteEnable)2346 tu_CmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer,
2347                              VkBool32 depthWriteEnable)
2348 {
2349    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2350 
2351    cmd->state.rb_depth_cntl &= ~A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
2352 
2353    if (depthWriteEnable)
2354       cmd->state.rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE;
2355 
2356    cmd->state.dirty |= TU_CMD_DIRTY_RB_DEPTH_CNTL;
2357 }
2358 
2359 void
tu_CmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer,VkCompareOp depthCompareOp)2360 tu_CmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer,
2361                            VkCompareOp depthCompareOp)
2362 {
2363    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2364 
2365    cmd->state.rb_depth_cntl &= ~A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
2366 
2367    cmd->state.rb_depth_cntl |=
2368       A6XX_RB_DEPTH_CNTL_ZFUNC(tu6_compare_func(depthCompareOp));
2369 
2370    cmd->state.dirty |= TU_CMD_DIRTY_RB_DEPTH_CNTL;
2371 }
2372 
2373 void
tu_CmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,VkBool32 depthBoundsTestEnable)2374 tu_CmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,
2375                                   VkBool32 depthBoundsTestEnable)
2376 {
2377    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2378 
2379    cmd->state.rb_depth_cntl &= ~A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE;
2380 
2381    if (depthBoundsTestEnable)
2382       cmd->state.rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE;
2383 
2384    cmd->state.dirty |= TU_CMD_DIRTY_RB_DEPTH_CNTL;
2385 }
2386 
2387 void
tu_CmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer,VkBool32 stencilTestEnable)2388 tu_CmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer,
2389                               VkBool32 stencilTestEnable)
2390 {
2391    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2392 
2393    cmd->state.rb_stencil_cntl &= ~(
2394       A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
2395       A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
2396       A6XX_RB_STENCIL_CONTROL_STENCIL_READ);
2397 
2398    if (stencilTestEnable) {
2399       cmd->state.rb_stencil_cntl |=
2400          A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE |
2401          A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF |
2402          A6XX_RB_STENCIL_CONTROL_STENCIL_READ;
2403    }
2404 
2405    cmd->state.dirty |= TU_CMD_DIRTY_RB_STENCIL_CNTL;
2406 }
2407 
2408 void
tu_CmdSetStencilOpEXT(VkCommandBuffer commandBuffer,VkStencilFaceFlags faceMask,VkStencilOp failOp,VkStencilOp passOp,VkStencilOp depthFailOp,VkCompareOp compareOp)2409 tu_CmdSetStencilOpEXT(VkCommandBuffer commandBuffer,
2410                       VkStencilFaceFlags faceMask,
2411                       VkStencilOp failOp,
2412                       VkStencilOp passOp,
2413                       VkStencilOp depthFailOp,
2414                       VkCompareOp compareOp)
2415 {
2416    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2417 
2418    if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
2419       cmd->state.rb_stencil_cntl &= ~(
2420          A6XX_RB_STENCIL_CONTROL_FUNC__MASK |
2421          A6XX_RB_STENCIL_CONTROL_FAIL__MASK |
2422          A6XX_RB_STENCIL_CONTROL_ZPASS__MASK |
2423          A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK);
2424 
2425       cmd->state.rb_stencil_cntl |=
2426          A6XX_RB_STENCIL_CONTROL_FUNC(tu6_compare_func(compareOp)) |
2427          A6XX_RB_STENCIL_CONTROL_FAIL(tu6_stencil_op(failOp)) |
2428          A6XX_RB_STENCIL_CONTROL_ZPASS(tu6_stencil_op(passOp)) |
2429          A6XX_RB_STENCIL_CONTROL_ZFAIL(tu6_stencil_op(depthFailOp));
2430    }
2431 
2432    if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
2433       cmd->state.rb_stencil_cntl &= ~(
2434          A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK |
2435          A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK |
2436          A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK |
2437          A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK);
2438 
2439       cmd->state.rb_stencil_cntl |=
2440          A6XX_RB_STENCIL_CONTROL_FUNC_BF(tu6_compare_func(compareOp)) |
2441          A6XX_RB_STENCIL_CONTROL_FAIL_BF(tu6_stencil_op(failOp)) |
2442          A6XX_RB_STENCIL_CONTROL_ZPASS_BF(tu6_stencil_op(passOp)) |
2443          A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(tu6_stencil_op(depthFailOp));
2444    }
2445 
2446    cmd->state.dirty |= TU_CMD_DIRTY_RB_STENCIL_CNTL;
2447 }
2448 
2449 static void
tu_flush_for_access(struct tu_cache_state * cache,enum tu_cmd_access_mask src_mask,enum tu_cmd_access_mask dst_mask)2450 tu_flush_for_access(struct tu_cache_state *cache,
2451                     enum tu_cmd_access_mask src_mask,
2452                     enum tu_cmd_access_mask dst_mask)
2453 {
2454    enum tu_cmd_flush_bits flush_bits = 0;
2455 
2456    if (src_mask & TU_ACCESS_HOST_WRITE) {
2457       /* Host writes are always visible to CP, so only invalidate GPU caches */
2458       cache->pending_flush_bits |= TU_CMD_FLAG_GPU_INVALIDATE;
2459    }
2460 
2461    if (src_mask & TU_ACCESS_SYSMEM_WRITE) {
2462       /* Invalidate CP and 2D engine (make it do WFI + WFM if necessary) as
2463        * well.
2464        */
2465       cache->pending_flush_bits |= TU_CMD_FLAG_ALL_INVALIDATE;
2466    }
2467 
2468    if (src_mask & TU_ACCESS_CP_WRITE) {
2469       /* Flush the CP write queue. However a WFI shouldn't be necessary as
2470        * WAIT_MEM_WRITES should cover it.
2471        */
2472       cache->pending_flush_bits |=
2473          TU_CMD_FLAG_WAIT_MEM_WRITES |
2474          TU_CMD_FLAG_GPU_INVALIDATE |
2475          TU_CMD_FLAG_WAIT_FOR_ME;
2476    }
2477 
2478 #define SRC_FLUSH(domain, flush, invalidate) \
2479    if (src_mask & TU_ACCESS_##domain##_WRITE) {                      \
2480       cache->pending_flush_bits |= TU_CMD_FLAG_##flush |             \
2481          (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
2482    }
2483 
2484    SRC_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2485    SRC_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2486    SRC_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2487 
2488 #undef SRC_FLUSH
2489 
2490 #define SRC_INCOHERENT_FLUSH(domain, flush, invalidate)              \
2491    if (src_mask & TU_ACCESS_##domain##_INCOHERENT_WRITE) {           \
2492       flush_bits |= TU_CMD_FLAG_##flush;                             \
2493       cache->pending_flush_bits |=                                   \
2494          (TU_CMD_FLAG_ALL_INVALIDATE & ~TU_CMD_FLAG_##invalidate);   \
2495    }
2496 
2497    SRC_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2498    SRC_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2499 
2500 #undef SRC_INCOHERENT_FLUSH
2501 
2502    /* Treat host & sysmem write accesses the same, since the kernel implicitly
2503     * drains the queue before signalling completion to the host.
2504     */
2505    if (dst_mask & (TU_ACCESS_SYSMEM_READ | TU_ACCESS_SYSMEM_WRITE |
2506                    TU_ACCESS_HOST_READ | TU_ACCESS_HOST_WRITE)) {
2507       flush_bits |= cache->pending_flush_bits & TU_CMD_FLAG_ALL_FLUSH;
2508    }
2509 
2510 #define DST_FLUSH(domain, flush, invalidate) \
2511    if (dst_mask & (TU_ACCESS_##domain##_READ |                 \
2512                    TU_ACCESS_##domain##_WRITE)) {              \
2513       flush_bits |= cache->pending_flush_bits &                \
2514          (TU_CMD_FLAG_##invalidate |                           \
2515           (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));     \
2516    }
2517 
2518    DST_FLUSH(UCHE, CACHE_FLUSH, CACHE_INVALIDATE)
2519    DST_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2520    DST_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2521 
2522 #undef DST_FLUSH
2523 
2524 #define DST_INCOHERENT_FLUSH(domain, flush, invalidate) \
2525    if (dst_mask & (TU_ACCESS_##domain##_INCOHERENT_READ |      \
2526                    TU_ACCESS_##domain##_INCOHERENT_WRITE)) {   \
2527       flush_bits |= TU_CMD_FLAG_##invalidate |                 \
2528           (cache->pending_flush_bits &                         \
2529            (TU_CMD_FLAG_ALL_FLUSH & ~TU_CMD_FLAG_##flush));    \
2530    }
2531 
2532    DST_INCOHERENT_FLUSH(CCU_COLOR, CCU_FLUSH_COLOR, CCU_INVALIDATE_COLOR)
2533    DST_INCOHERENT_FLUSH(CCU_DEPTH, CCU_FLUSH_DEPTH, CCU_INVALIDATE_DEPTH)
2534 
2535 #undef DST_INCOHERENT_FLUSH
2536 
2537    if (dst_mask & TU_ACCESS_WFI_READ) {
2538       flush_bits |= cache->pending_flush_bits &
2539          (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_IDLE);
2540    }
2541 
2542    if (dst_mask & TU_ACCESS_WFM_READ) {
2543       flush_bits |= cache->pending_flush_bits &
2544          (TU_CMD_FLAG_ALL_FLUSH | TU_CMD_FLAG_WAIT_FOR_ME);
2545    }
2546 
2547    cache->flush_bits |= flush_bits;
2548    cache->pending_flush_bits &= ~flush_bits;
2549 }
2550 
2551 static enum tu_cmd_access_mask
vk2tu_access(VkAccessFlags flags,bool gmem)2552 vk2tu_access(VkAccessFlags flags, bool gmem)
2553 {
2554    enum tu_cmd_access_mask mask = 0;
2555 
2556    /* If the GPU writes a buffer that is then read by an indirect draw
2557     * command, we theoretically need to emit a WFI to wait for any cache
2558     * flushes, and then a WAIT_FOR_ME to wait on the CP for the WFI to
2559     * complete. Waiting for the WFI to complete is performed as part of the
2560     * draw by the firmware, so we just need to execute the WFI.
2561     *
2562     * Transform feedback counters are read via CP_MEM_TO_REG, which implicitly
2563     * does CP_WAIT_FOR_ME, but we still need a WFI if the GPU writes it.
2564     *
2565     * Currently we read the draw predicate using CP_MEM_TO_MEM, which
2566     * also implicitly does CP_WAIT_FOR_ME. However CP_DRAW_PRED_SET does *not*
2567     * implicitly do CP_WAIT_FOR_ME, it seems to only wait for counters to
2568     * complete since it's written for DX11 where you can only predicate on the
2569     * result of a query object. So if we implement 64-bit comparisons in the
2570     * future, or if CP_DRAW_PRED_SET grows the capability to do 32-bit
2571     * comparisons, then this will have to be dealt with.
2572     */
2573    if (flags &
2574        (VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
2575         VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT |
2576         VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT |
2577         VK_ACCESS_MEMORY_READ_BIT)) {
2578       mask |= TU_ACCESS_WFI_READ;
2579    }
2580 
2581    if (flags &
2582        (VK_ACCESS_INDIRECT_COMMAND_READ_BIT | /* Read performed by CP */
2583         VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT | /* Read performed by CP */
2584         VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT | /* Read performed by CP */
2585         VK_ACCESS_MEMORY_READ_BIT)) {
2586       mask |= TU_ACCESS_SYSMEM_READ;
2587    }
2588 
2589    if (flags &
2590        (VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
2591         VK_ACCESS_MEMORY_WRITE_BIT)) {
2592       mask |= TU_ACCESS_CP_WRITE;
2593    }
2594 
2595    if (flags &
2596        (VK_ACCESS_HOST_READ_BIT |
2597         VK_ACCESS_MEMORY_WRITE_BIT)) {
2598       mask |= TU_ACCESS_HOST_READ;
2599    }
2600 
2601    if (flags &
2602        (VK_ACCESS_HOST_WRITE_BIT |
2603         VK_ACCESS_MEMORY_WRITE_BIT)) {
2604       mask |= TU_ACCESS_HOST_WRITE;
2605    }
2606 
2607    if (flags &
2608        (VK_ACCESS_INDEX_READ_BIT | /* Read performed by PC, I think */
2609         VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | /* Read performed by VFD */
2610         VK_ACCESS_UNIFORM_READ_BIT | /* Read performed by SP */
2611         /* TODO: Is there a no-cache bit for textures so that we can ignore
2612          * these?
2613          */
2614         VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | /* Read performed by TP */
2615         VK_ACCESS_SHADER_READ_BIT | /* Read perfomed by SP/TP */
2616         VK_ACCESS_MEMORY_READ_BIT)) {
2617       mask |= TU_ACCESS_UCHE_READ;
2618    }
2619 
2620    if (flags &
2621        (VK_ACCESS_SHADER_WRITE_BIT | /* Write performed by SP */
2622         VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT | /* Write performed by VPC */
2623         VK_ACCESS_MEMORY_WRITE_BIT)) {
2624       mask |= TU_ACCESS_UCHE_WRITE;
2625    }
2626 
2627    /* When using GMEM, the CCU is always flushed automatically to GMEM, and
2628     * then GMEM is flushed to sysmem. Furthermore, we already had to flush any
2629     * previous writes in sysmem mode when transitioning to GMEM. Therefore we
2630     * can ignore CCU and pretend that color attachments and transfers use
2631     * sysmem directly.
2632     */
2633 
2634    if (flags &
2635        (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
2636         VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT |
2637         VK_ACCESS_MEMORY_READ_BIT)) {
2638       if (gmem)
2639          mask |= TU_ACCESS_SYSMEM_READ;
2640       else
2641          mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_READ;
2642    }
2643 
2644    if (flags &
2645        (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
2646         VK_ACCESS_MEMORY_READ_BIT)) {
2647       if (gmem)
2648          mask |= TU_ACCESS_SYSMEM_READ;
2649       else
2650          mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_READ;
2651    }
2652 
2653    if (flags &
2654        (VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
2655         VK_ACCESS_MEMORY_WRITE_BIT)) {
2656       if (gmem) {
2657          mask |= TU_ACCESS_SYSMEM_WRITE;
2658       } else {
2659          mask |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2660       }
2661    }
2662 
2663    if (flags &
2664        (VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
2665         VK_ACCESS_MEMORY_WRITE_BIT)) {
2666       if (gmem) {
2667          mask |= TU_ACCESS_SYSMEM_WRITE;
2668       } else {
2669          mask |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2670       }
2671    }
2672 
2673    /* When the dst access is a transfer read/write, it seems we sometimes need
2674     * to insert a WFI after any flushes, to guarantee that the flushes finish
2675     * before the 2D engine starts. However the opposite (i.e. a WFI after
2676     * CP_BLIT and before any subsequent flush) does not seem to be needed, and
2677     * the blob doesn't emit such a WFI.
2678     */
2679 
2680    if (flags &
2681        (VK_ACCESS_TRANSFER_WRITE_BIT |
2682         VK_ACCESS_MEMORY_WRITE_BIT)) {
2683       if (gmem) {
2684          mask |= TU_ACCESS_SYSMEM_WRITE;
2685       } else {
2686          mask |= TU_ACCESS_CCU_COLOR_WRITE;
2687       }
2688       mask |= TU_ACCESS_WFI_READ;
2689    }
2690 
2691    if (flags &
2692        (VK_ACCESS_TRANSFER_READ_BIT | /* Access performed by TP */
2693         VK_ACCESS_MEMORY_READ_BIT)) {
2694       mask |= TU_ACCESS_UCHE_READ | TU_ACCESS_WFI_READ;
2695    }
2696 
2697    return mask;
2698 }
2699 
2700 
2701 void
tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCmdBuffers)2702 tu_CmdExecuteCommands(VkCommandBuffer commandBuffer,
2703                       uint32_t commandBufferCount,
2704                       const VkCommandBuffer *pCmdBuffers)
2705 {
2706    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2707    VkResult result;
2708 
2709    assert(commandBufferCount > 0);
2710 
2711    /* Emit any pending flushes. */
2712    if (cmd->state.pass) {
2713       tu_flush_all_pending(&cmd->state.renderpass_cache);
2714       tu_emit_cache_flush_renderpass(cmd, &cmd->draw_cs);
2715    } else {
2716       tu_flush_all_pending(&cmd->state.cache);
2717       tu_emit_cache_flush(cmd, &cmd->cs);
2718    }
2719 
2720    for (uint32_t i = 0; i < commandBufferCount; i++) {
2721       TU_FROM_HANDLE(tu_cmd_buffer, secondary, pCmdBuffers[i]);
2722 
2723       if (secondary->usage_flags &
2724           VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2725          assert(tu_cs_is_empty(&secondary->cs));
2726 
2727          result = tu_cs_add_entries(&cmd->draw_cs, &secondary->draw_cs);
2728          if (result != VK_SUCCESS) {
2729             cmd->record_result = result;
2730             break;
2731          }
2732 
2733          result = tu_cs_add_entries(&cmd->draw_epilogue_cs,
2734                &secondary->draw_epilogue_cs);
2735          if (result != VK_SUCCESS) {
2736             cmd->record_result = result;
2737             break;
2738          }
2739 
2740          if (secondary->state.has_tess)
2741             cmd->state.has_tess = true;
2742          if (secondary->state.has_subpass_predication)
2743             cmd->state.has_subpass_predication = true;
2744       } else {
2745          assert(tu_cs_is_empty(&secondary->draw_cs));
2746          assert(tu_cs_is_empty(&secondary->draw_epilogue_cs));
2747 
2748          tu_cs_add_entries(&cmd->cs, &secondary->cs);
2749       }
2750 
2751       cmd->state.index_size = secondary->state.index_size; /* for restart index update */
2752    }
2753    cmd->state.dirty = ~0u; /* TODO: set dirty only what needs to be */
2754 
2755    if (cmd->state.pass) {
2756       /* After a secondary command buffer is executed, LRZ is not valid
2757        * until it is cleared again.
2758        */
2759       cmd->state.lrz.valid = false;
2760    }
2761 
2762    /* After executing secondary command buffers, there may have been arbitrary
2763     * flushes executed, so when we encounter a pipeline barrier with a
2764     * srcMask, we have to assume that we need to invalidate. Therefore we need
2765     * to re-initialize the cache with all pending invalidate bits set.
2766     */
2767    if (cmd->state.pass) {
2768       tu_cache_init(&cmd->state.renderpass_cache);
2769    } else {
2770       tu_cache_init(&cmd->state.cache);
2771    }
2772 }
2773 
2774 VkResult
tu_CreateCommandPool(VkDevice _device,const VkCommandPoolCreateInfo * pCreateInfo,const VkAllocationCallbacks * pAllocator,VkCommandPool * pCmdPool)2775 tu_CreateCommandPool(VkDevice _device,
2776                      const VkCommandPoolCreateInfo *pCreateInfo,
2777                      const VkAllocationCallbacks *pAllocator,
2778                      VkCommandPool *pCmdPool)
2779 {
2780    TU_FROM_HANDLE(tu_device, device, _device);
2781    struct tu_cmd_pool *pool;
2782 
2783    pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
2784                           VK_OBJECT_TYPE_COMMAND_POOL);
2785    if (pool == NULL)
2786       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2787 
2788    if (pAllocator)
2789       pool->alloc = *pAllocator;
2790    else
2791       pool->alloc = device->vk.alloc;
2792 
2793    list_inithead(&pool->cmd_buffers);
2794    list_inithead(&pool->free_cmd_buffers);
2795 
2796    pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2797 
2798    *pCmdPool = tu_cmd_pool_to_handle(pool);
2799 
2800    return VK_SUCCESS;
2801 }
2802 
2803 void
tu_DestroyCommandPool(VkDevice _device,VkCommandPool commandPool,const VkAllocationCallbacks * pAllocator)2804 tu_DestroyCommandPool(VkDevice _device,
2805                       VkCommandPool commandPool,
2806                       const VkAllocationCallbacks *pAllocator)
2807 {
2808    TU_FROM_HANDLE(tu_device, device, _device);
2809    TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2810 
2811    if (!pool)
2812       return;
2813 
2814    list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2815                             &pool->cmd_buffers, pool_link)
2816    {
2817       tu_cmd_buffer_destroy(cmd_buffer);
2818    }
2819 
2820    list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2821                             &pool->free_cmd_buffers, pool_link)
2822    {
2823       tu_cmd_buffer_destroy(cmd_buffer);
2824    }
2825 
2826    vk_object_free(&device->vk, pAllocator, pool);
2827 }
2828 
2829 VkResult
tu_ResetCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolResetFlags flags)2830 tu_ResetCommandPool(VkDevice device,
2831                     VkCommandPool commandPool,
2832                     VkCommandPoolResetFlags flags)
2833 {
2834    TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2835    VkResult result;
2836 
2837    list_for_each_entry(struct tu_cmd_buffer, cmd_buffer, &pool->cmd_buffers,
2838                        pool_link)
2839    {
2840       result = tu_reset_cmd_buffer(cmd_buffer);
2841       if (result != VK_SUCCESS)
2842          return result;
2843    }
2844 
2845    return VK_SUCCESS;
2846 }
2847 
2848 void
tu_TrimCommandPool(VkDevice device,VkCommandPool commandPool,VkCommandPoolTrimFlags flags)2849 tu_TrimCommandPool(VkDevice device,
2850                    VkCommandPool commandPool,
2851                    VkCommandPoolTrimFlags flags)
2852 {
2853    TU_FROM_HANDLE(tu_cmd_pool, pool, commandPool);
2854 
2855    if (!pool)
2856       return;
2857 
2858    list_for_each_entry_safe(struct tu_cmd_buffer, cmd_buffer,
2859                             &pool->free_cmd_buffers, pool_link)
2860    {
2861       tu_cmd_buffer_destroy(cmd_buffer);
2862    }
2863 }
2864 
2865 static void
tu_subpass_barrier(struct tu_cmd_buffer * cmd_buffer,const struct tu_subpass_barrier * barrier,bool external)2866 tu_subpass_barrier(struct tu_cmd_buffer *cmd_buffer,
2867                    const struct tu_subpass_barrier *barrier,
2868                    bool external)
2869 {
2870    /* Note: we don't know until the end of the subpass whether we'll use
2871     * sysmem, so assume sysmem here to be safe.
2872     */
2873    struct tu_cache_state *cache =
2874       external ? &cmd_buffer->state.cache : &cmd_buffer->state.renderpass_cache;
2875    enum tu_cmd_access_mask src_flags =
2876       vk2tu_access(barrier->src_access_mask, false);
2877    enum tu_cmd_access_mask dst_flags =
2878       vk2tu_access(barrier->dst_access_mask, false);
2879 
2880    if (barrier->incoherent_ccu_color)
2881       src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
2882    if (barrier->incoherent_ccu_depth)
2883       src_flags |= TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE;
2884 
2885    tu_flush_for_access(cache, src_flags, dst_flags);
2886 }
2887 
2888 void
tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,const VkRenderPassBeginInfo * pRenderPassBegin,const VkSubpassBeginInfo * pSubpassBeginInfo)2889 tu_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
2890                        const VkRenderPassBeginInfo *pRenderPassBegin,
2891                        const VkSubpassBeginInfo *pSubpassBeginInfo)
2892 {
2893    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2894    TU_FROM_HANDLE(tu_render_pass, pass, pRenderPassBegin->renderPass);
2895    TU_FROM_HANDLE(tu_framebuffer, fb, pRenderPassBegin->framebuffer);
2896 
2897    cmd->state.pass = pass;
2898    cmd->state.subpass = pass->subpasses;
2899    cmd->state.framebuffer = fb;
2900    cmd->state.render_area = pRenderPassBegin->renderArea;
2901 
2902    tu_cmd_prepare_tile_store_ib(cmd);
2903 
2904    /* Note: because this is external, any flushes will happen before draw_cs
2905     * gets called. However deferred flushes could have to happen later as part
2906     * of the subpass.
2907     */
2908    tu_subpass_barrier(cmd, &pass->subpasses[0].start_barrier, true);
2909    cmd->state.renderpass_cache.pending_flush_bits =
2910       cmd->state.cache.pending_flush_bits;
2911    cmd->state.renderpass_cache.flush_bits = 0;
2912 
2913    /* Track LRZ valid state */
2914    uint32_t a = cmd->state.subpass->depth_stencil_attachment.attachment;
2915    if (a != VK_ATTACHMENT_UNUSED) {
2916       const struct tu_render_pass_attachment *att = &cmd->state.pass->attachments[a];
2917       struct tu_image *image = fb->attachments[a].attachment->image;
2918       /* if image as lrz and it isn't a stencil-only clear: */
2919       if (image->lrz_height &&
2920           (att->clear_mask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT))) {
2921          cmd->state.lrz.image = image;
2922          cmd->state.lrz.valid = true;
2923 
2924          tu6_clear_lrz(cmd, &cmd->cs, image, &pRenderPassBegin->pClearValues[a]);
2925          tu6_emit_event_write(cmd, &cmd->cs, PC_CCU_FLUSH_COLOR_TS);
2926       } else {
2927          cmd->state.lrz.valid = false;
2928       }
2929       cmd->state.dirty |= TU_CMD_DIRTY_LRZ;
2930    }
2931 
2932    tu_emit_renderpass_begin(cmd, pRenderPassBegin);
2933 
2934    tu6_emit_zs(cmd, cmd->state.subpass, &cmd->draw_cs);
2935    tu6_emit_mrt(cmd, cmd->state.subpass, &cmd->draw_cs);
2936    tu6_emit_msaa(&cmd->draw_cs, cmd->state.subpass->samples);
2937    tu6_emit_render_cntl(cmd, cmd->state.subpass, &cmd->draw_cs, false);
2938 
2939    tu_set_input_attachments(cmd, cmd->state.subpass);
2940 
2941    cmd->state.dirty |= TU_CMD_DIRTY_DRAW_STATE;
2942 }
2943 
2944 void
tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,const VkSubpassBeginInfo * pSubpassBeginInfo,const VkSubpassEndInfo * pSubpassEndInfo)2945 tu_CmdNextSubpass2(VkCommandBuffer commandBuffer,
2946                    const VkSubpassBeginInfo *pSubpassBeginInfo,
2947                    const VkSubpassEndInfo *pSubpassEndInfo)
2948 {
2949    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
2950    const struct tu_render_pass *pass = cmd->state.pass;
2951    struct tu_cs *cs = &cmd->draw_cs;
2952 
2953    const struct tu_subpass *subpass = cmd->state.subpass++;
2954 
2955    /* Track LRZ valid state
2956     *
2957     * TODO: Improve this tracking for keeping the state of the past depth/stencil images,
2958     * so if they become active again, we reuse its old state.
2959     */
2960    cmd->state.lrz.valid = false;
2961    cmd->state.dirty |= TU_CMD_DIRTY_LRZ;
2962 
2963    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_GMEM);
2964 
2965    if (subpass->resolve_attachments) {
2966       tu6_emit_blit_scissor(cmd, cs, true);
2967 
2968       for (unsigned i = 0; i < subpass->color_count; i++) {
2969          uint32_t a = subpass->resolve_attachments[i].attachment;
2970          if (a == VK_ATTACHMENT_UNUSED)
2971             continue;
2972 
2973          tu_store_gmem_attachment(cmd, cs, a,
2974                                   subpass->color_attachments[i].attachment);
2975 
2976          if (pass->attachments[a].gmem_offset < 0)
2977             continue;
2978 
2979          /* TODO:
2980           * check if the resolved attachment is needed by later subpasses,
2981           * if it is, should be doing a GMEM->GMEM resolve instead of GMEM->MEM->GMEM..
2982           */
2983          tu_finishme("missing GMEM->GMEM resolve path\n");
2984          tu_load_gmem_attachment(cmd, cs, a, true);
2985       }
2986    }
2987 
2988    tu_cond_exec_end(cs);
2989 
2990    tu_cond_exec_start(cs, CP_COND_EXEC_0_RENDER_MODE_SYSMEM);
2991 
2992    tu6_emit_sysmem_resolves(cmd, cs, subpass);
2993 
2994    tu_cond_exec_end(cs);
2995 
2996    /* Handle dependencies for the next subpass */
2997    tu_subpass_barrier(cmd, &cmd->state.subpass->start_barrier, false);
2998 
2999    /* emit mrt/zs/msaa/ubwc state for the subpass that is starting */
3000    tu6_emit_zs(cmd, cmd->state.subpass, cs);
3001    tu6_emit_mrt(cmd, cmd->state.subpass, cs);
3002    tu6_emit_msaa(cs, cmd->state.subpass->samples);
3003    tu6_emit_render_cntl(cmd, cmd->state.subpass, cs, false);
3004 
3005    tu_set_input_attachments(cmd, cmd->state.subpass);
3006 }
3007 
3008 static void
tu6_emit_user_consts(struct tu_cs * cs,const struct tu_pipeline * pipeline,struct tu_descriptor_state * descriptors_state,gl_shader_stage type,uint32_t * push_constants)3009 tu6_emit_user_consts(struct tu_cs *cs, const struct tu_pipeline *pipeline,
3010                      struct tu_descriptor_state *descriptors_state,
3011                      gl_shader_stage type,
3012                      uint32_t *push_constants)
3013 {
3014    const struct tu_program_descriptor_linkage *link =
3015       &pipeline->program.link[type];
3016    const struct ir3_ubo_analysis_state *state = &link->const_state.ubo_state;
3017 
3018    if (link->push_consts.count > 0) {
3019       unsigned num_units = link->push_consts.count;
3020       unsigned offset = link->push_consts.lo;
3021       tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_units * 4);
3022       tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3023             CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3024             CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3025             CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3026             CP_LOAD_STATE6_0_NUM_UNIT(num_units));
3027       tu_cs_emit(cs, 0);
3028       tu_cs_emit(cs, 0);
3029       for (unsigned i = 0; i < num_units * 4; i++)
3030          tu_cs_emit(cs, push_constants[i + offset * 4]);
3031    }
3032 
3033    for (uint32_t i = 0; i < state->num_enabled; i++) {
3034       uint32_t size = state->range[i].end - state->range[i].start;
3035       uint32_t offset = state->range[i].start;
3036 
3037       /* and even if the start of the const buffer is before
3038        * first_immediate, the end may not be:
3039        */
3040       size = MIN2(size, (16 * link->constlen) - state->range[i].offset);
3041 
3042       if (size == 0)
3043          continue;
3044 
3045       /* things should be aligned to vec4: */
3046       debug_assert((state->range[i].offset % 16) == 0);
3047       debug_assert((size % 16) == 0);
3048       debug_assert((offset % 16) == 0);
3049 
3050       /* Dig out the descriptor from the descriptor state and read the VA from
3051        * it.
3052        */
3053       assert(state->range[i].ubo.bindless);
3054       uint32_t *base = state->range[i].ubo.bindless_base == MAX_SETS ?
3055          descriptors_state->dynamic_descriptors :
3056          descriptors_state->sets[state->range[i].ubo.bindless_base]->mapped_ptr;
3057       unsigned block = state->range[i].ubo.block;
3058       uint32_t *desc = base + block * A6XX_TEX_CONST_DWORDS;
3059       uint64_t va = desc[0] | ((uint64_t)(desc[1] & A6XX_UBO_1_BASE_HI__MASK) << 32);
3060       assert(va);
3061 
3062       tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3);
3063       tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(state->range[i].offset / 16) |
3064             CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3065             CP_LOAD_STATE6_0_STATE_SRC(SS6_INDIRECT) |
3066             CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3067             CP_LOAD_STATE6_0_NUM_UNIT(size / 16));
3068       tu_cs_emit_qw(cs, va + offset);
3069    }
3070 }
3071 
3072 static struct tu_draw_state
tu6_emit_consts(struct tu_cmd_buffer * cmd,const struct tu_pipeline * pipeline,struct tu_descriptor_state * descriptors_state,gl_shader_stage type)3073 tu6_emit_consts(struct tu_cmd_buffer *cmd,
3074                 const struct tu_pipeline *pipeline,
3075                 struct tu_descriptor_state *descriptors_state,
3076                 gl_shader_stage type)
3077 {
3078    struct tu_cs cs;
3079    tu_cs_begin_sub_stream(&cmd->sub_cs, 512, &cs); /* TODO: maximum size? */
3080 
3081    tu6_emit_user_consts(&cs, pipeline, descriptors_state, type, cmd->push_constants);
3082 
3083    return tu_cs_end_draw_state(&cmd->sub_cs, &cs);
3084 }
3085 
3086 static uint64_t
get_tess_param_bo_size(const struct tu_pipeline * pipeline,uint32_t draw_count)3087 get_tess_param_bo_size(const struct tu_pipeline *pipeline,
3088                        uint32_t draw_count)
3089 {
3090    /* TODO: For indirect draws, we can't compute the BO size ahead of time.
3091     * Still not sure what to do here, so just allocate a reasonably large
3092     * BO and hope for the best for now. */
3093    if (!draw_count)
3094       draw_count = 2048;
3095 
3096    /* the tess param BO is pipeline->tess.param_stride bytes per patch,
3097     * which includes both the per-vertex outputs and per-patch outputs
3098     * build_primitive_map in ir3 calculates this stride
3099     */
3100    uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
3101    uint32_t num_patches = draw_count / verts_per_patch;
3102    return num_patches * pipeline->tess.param_stride;
3103 }
3104 
3105 static uint64_t
get_tess_factor_bo_size(const struct tu_pipeline * pipeline,uint32_t draw_count)3106 get_tess_factor_bo_size(const struct tu_pipeline *pipeline,
3107                         uint32_t draw_count)
3108 {
3109    /* TODO: For indirect draws, we can't compute the BO size ahead of time.
3110     * Still not sure what to do here, so just allocate a reasonably large
3111     * BO and hope for the best for now. */
3112    if (!draw_count)
3113       draw_count = 2048;
3114 
3115    /* Each distinct patch gets its own tess factor output. */
3116    uint32_t verts_per_patch = pipeline->ia.primtype - DI_PT_PATCHES0;
3117    uint32_t num_patches = draw_count / verts_per_patch;
3118    uint32_t factor_stride;
3119    switch (pipeline->tess.patch_type) {
3120    case IR3_TESS_ISOLINES:
3121       factor_stride = 12;
3122       break;
3123    case IR3_TESS_TRIANGLES:
3124       factor_stride = 20;
3125       break;
3126    case IR3_TESS_QUADS:
3127       factor_stride = 28;
3128       break;
3129    default:
3130       unreachable("bad tessmode");
3131    }
3132    return factor_stride * num_patches;
3133 }
3134 
3135 static VkResult
tu6_emit_tess_consts(struct tu_cmd_buffer * cmd,uint32_t draw_count,const struct tu_pipeline * pipeline,struct tu_draw_state * state,uint64_t * factor_iova)3136 tu6_emit_tess_consts(struct tu_cmd_buffer *cmd,
3137                      uint32_t draw_count,
3138                      const struct tu_pipeline *pipeline,
3139                      struct tu_draw_state *state,
3140                      uint64_t *factor_iova)
3141 {
3142    struct tu_cs cs;
3143    VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 16, &cs);
3144    if (result != VK_SUCCESS)
3145       return result;
3146 
3147    uint64_t tess_factor_size = get_tess_factor_bo_size(pipeline, draw_count);
3148    uint64_t tess_param_size = get_tess_param_bo_size(pipeline, draw_count);
3149    uint64_t tess_bo_size =  tess_factor_size + tess_param_size;
3150    if (tess_bo_size > 0) {
3151       struct tu_bo *tess_bo;
3152       result = tu_get_scratch_bo(cmd->device, tess_bo_size, &tess_bo);
3153       if (result != VK_SUCCESS)
3154          return result;
3155 
3156       uint64_t tess_factor_iova = tess_bo->iova;
3157       uint64_t tess_param_iova = tess_factor_iova + tess_factor_size;
3158 
3159       tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3160       tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.hs_bo_regid) |
3161             CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3162             CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3163             CP_LOAD_STATE6_0_STATE_BLOCK(SB6_HS_SHADER) |
3164             CP_LOAD_STATE6_0_NUM_UNIT(1));
3165       tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3166       tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3167       tu_cs_emit_qw(&cs, tess_param_iova);
3168       tu_cs_emit_qw(&cs, tess_factor_iova);
3169 
3170       tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3171       tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(pipeline->tess.ds_bo_regid) |
3172             CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3173             CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3174             CP_LOAD_STATE6_0_STATE_BLOCK(SB6_DS_SHADER) |
3175             CP_LOAD_STATE6_0_NUM_UNIT(1));
3176       tu_cs_emit(&cs, CP_LOAD_STATE6_1_EXT_SRC_ADDR(0));
3177       tu_cs_emit(&cs, CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(0));
3178       tu_cs_emit_qw(&cs, tess_param_iova);
3179       tu_cs_emit_qw(&cs, tess_factor_iova);
3180 
3181       *factor_iova = tess_factor_iova;
3182    }
3183    *state = tu_cs_end_draw_state(&cmd->sub_cs, &cs);
3184    return VK_SUCCESS;
3185 }
3186 
3187 static struct tu_draw_state
tu6_build_lrz(struct tu_cmd_buffer * cmd)3188 tu6_build_lrz(struct tu_cmd_buffer *cmd)
3189 {
3190    const uint32_t a = cmd->state.subpass->depth_stencil_attachment.attachment;
3191    struct tu_cs lrz_cs;
3192    struct tu_draw_state ds = tu_cs_draw_state(&cmd->sub_cs, &lrz_cs, 4);
3193 
3194    if (cmd->state.pipeline->lrz.invalidate) {
3195       /* LRZ is not valid for next draw commands, so don't use it until cleared */
3196       cmd->state.lrz.valid = false;
3197    }
3198 
3199    if (a == VK_ATTACHMENT_UNUSED || !cmd->state.lrz.valid) {
3200       tu_cs_emit_regs(&lrz_cs, A6XX_GRAS_LRZ_CNTL(0));
3201       tu_cs_emit_regs(&lrz_cs, A6XX_RB_LRZ_CNTL(0));
3202       return ds;
3203    }
3204 
3205    /* Disable LRZ writes when blend is enabled, since the
3206     * resulting pixel value from the blend-draw
3207     * depends on an earlier draw, which LRZ in the draw pass
3208     * could early-reject if the previous blend-enabled draw wrote LRZ.
3209     *
3210     * TODO: We need to disable LRZ writes only for the binning pass.
3211     * Therefore, we need to emit it in a separate draw state. We keep
3212     * it disabled for sysmem path as well for the moment.
3213     */
3214    bool lrz_write = cmd->state.pipeline->lrz.write;
3215    if (cmd->state.pipeline->lrz.blend_disable_write)
3216       lrz_write = false;
3217 
3218    tu_cs_emit_regs(&lrz_cs, A6XX_GRAS_LRZ_CNTL(
3219       .enable = cmd->state.pipeline->lrz.enable,
3220       .greater = cmd->state.pipeline->lrz.greater,
3221       .lrz_write = lrz_write,
3222       .z_test_enable = cmd->state.pipeline->lrz.z_test_enable,
3223    ));
3224 
3225    tu_cs_emit_regs(&lrz_cs, A6XX_RB_LRZ_CNTL(.enable = cmd->state.pipeline->lrz.enable));
3226    return ds;
3227 }
3228 
3229 static VkResult
tu6_draw_common(struct tu_cmd_buffer * cmd,struct tu_cs * cs,bool indexed,uint32_t draw_count)3230 tu6_draw_common(struct tu_cmd_buffer *cmd,
3231                 struct tu_cs *cs,
3232                 bool indexed,
3233                 /* note: draw_count is 0 for indirect */
3234                 uint32_t draw_count)
3235 {
3236    const struct tu_pipeline *pipeline = cmd->state.pipeline;
3237    VkResult result;
3238 
3239    struct tu_descriptor_state *descriptors_state =
3240       &cmd->descriptors[VK_PIPELINE_BIND_POINT_GRAPHICS];
3241 
3242    tu_emit_cache_flush_renderpass(cmd, cs);
3243 
3244    if (cmd->state.dirty & TU_CMD_DIRTY_LRZ)
3245       cmd->state.lrz.state = tu6_build_lrz(cmd);
3246 
3247    tu_cs_emit_regs(cs, A6XX_PC_PRIMITIVE_CNTL_0(
3248          .primitive_restart =
3249                pipeline->ia.primitive_restart && indexed,
3250          .tess_upper_left_domain_origin =
3251                pipeline->tess.upper_left_domain_origin));
3252 
3253    if (cmd->state.dirty & TU_CMD_DIRTY_GRAS_SU_CNTL) {
3254       struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_GRAS_SU_CNTL, 2);
3255       tu_cs_emit_regs(&cs, A6XX_GRAS_SU_CNTL(.dword = cmd->state.gras_su_cntl));
3256    }
3257 
3258    if (cmd->state.dirty & TU_CMD_DIRTY_RB_DEPTH_CNTL) {
3259       struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_RB_DEPTH_CNTL, 2);
3260       uint32_t rb_depth_cntl = cmd->state.rb_depth_cntl;
3261 
3262       if ((rb_depth_cntl & A6XX_RB_DEPTH_CNTL_Z_ENABLE) ||
3263           (rb_depth_cntl & A6XX_RB_DEPTH_CNTL_Z_BOUNDS_ENABLE))
3264          rb_depth_cntl |= A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE;
3265 
3266       if (pipeline->rb_depth_cntl_disable)
3267          rb_depth_cntl = 0;
3268 
3269       tu_cs_emit_regs(&cs, A6XX_RB_DEPTH_CNTL(.dword = rb_depth_cntl));
3270    }
3271 
3272    if (cmd->state.dirty & TU_CMD_DIRTY_RB_STENCIL_CNTL) {
3273       struct tu_cs cs = tu_cmd_dynamic_state(cmd, TU_DYNAMIC_STATE_RB_STENCIL_CNTL, 2);
3274       tu_cs_emit_regs(&cs, A6XX_RB_STENCIL_CONTROL(.dword = cmd->state.rb_stencil_cntl));
3275    }
3276 
3277    if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
3278       cmd->state.shader_const[MESA_SHADER_VERTEX] =
3279          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_VERTEX);
3280       cmd->state.shader_const[MESA_SHADER_TESS_CTRL] =
3281          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_CTRL);
3282       cmd->state.shader_const[MESA_SHADER_TESS_EVAL] =
3283          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_TESS_EVAL);
3284       cmd->state.shader_const[MESA_SHADER_GEOMETRY] =
3285          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_GEOMETRY);
3286       cmd->state.shader_const[MESA_SHADER_FRAGMENT] =
3287          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_FRAGMENT);
3288    }
3289 
3290    bool has_tess =
3291          pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3292    struct tu_draw_state tess_consts = {};
3293    if (has_tess) {
3294       uint64_t tess_factor_iova = 0;
3295 
3296       cmd->state.has_tess = true;
3297       result = tu6_emit_tess_consts(cmd, draw_count, pipeline, &tess_consts, &tess_factor_iova);
3298       if (result != VK_SUCCESS)
3299          return result;
3300 
3301       /* this sequence matches what the blob does before every tess draw
3302        * PC_TESSFACTOR_ADDR_LO is a non-context register and needs a wfi
3303        * before writing to it
3304        */
3305       tu_cs_emit_wfi(cs);
3306 
3307       tu_cs_emit_pkt4(cs, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
3308       tu_cs_emit_qw(cs, tess_factor_iova);
3309 
3310       tu_cs_emit_pkt7(cs, CP_SET_SUBDRAW_SIZE, 1);
3311       tu_cs_emit(cs, draw_count);
3312    }
3313 
3314    /* for the first draw in a renderpass, re-emit all the draw states
3315     *
3316     * and if a draw-state disabling path (CmdClearAttachments 3D fallback) was
3317     * used, then draw states must be re-emitted. note however this only happens
3318     * in the sysmem path, so this can be skipped this for the gmem path (TODO)
3319     *
3320     * the two input attachment states are excluded because secondary command
3321     * buffer doesn't have a state ib to restore it, and not re-emitting them
3322     * is OK since CmdClearAttachments won't disable/overwrite them
3323     */
3324    if (cmd->state.dirty & TU_CMD_DIRTY_DRAW_STATE) {
3325       tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * (TU_DRAW_STATE_COUNT - 2));
3326 
3327       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM, pipeline->program.state);
3328       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_PROGRAM_BINNING, pipeline->program.binning_state);
3329       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
3330       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI, pipeline->vi.state);
3331       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VI_BINNING, pipeline->vi.binning_state);
3332       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_RAST, pipeline->rast_state);
3333       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_BLEND, pipeline->blend_state);
3334       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
3335       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
3336       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
3337       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
3338       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
3339       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS, cmd->state.desc_sets);
3340       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
3341       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
3342       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3343       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_LRZ, cmd->state.lrz.state);
3344 
3345       for (uint32_t i = 0; i < ARRAY_SIZE(cmd->state.dynamic_state); i++) {
3346          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + i,
3347                                ((pipeline->dynamic_state_mask & BIT(i)) ?
3348                                 cmd->state.dynamic_state[i] :
3349                                 pipeline->dynamic_state[i]));
3350       }
3351    } else {
3352       /* emit draw states that were just updated
3353        * note we eventually don't want to have to emit anything here
3354        */
3355       bool emit_binding_stride = false;
3356       uint32_t draw_state_count =
3357          has_tess +
3358          ((cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) ? 5 : 0) +
3359          ((cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD) ? 1 : 0) +
3360          ((cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS) ? 1 : 0) +
3361          ((cmd->state.dirty & TU_CMD_DIRTY_LRZ) ? 1 : 0) +
3362          1; /* vs_params */
3363 
3364       if ((cmd->state.dirty & TU_CMD_DIRTY_VB_STRIDE) &&
3365           !(pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_VB_STRIDE))) {
3366          emit_binding_stride = true;
3367          draw_state_count += 1;
3368       }
3369 
3370       tu_cs_emit_pkt7(cs, CP_SET_DRAW_STATE, 3 * draw_state_count);
3371 
3372       /* We may need to re-emit tess consts if the current draw call is
3373          * sufficiently larger than the last draw call. */
3374       if (has_tess)
3375          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_TESS, tess_consts);
3376       if (cmd->state.dirty & TU_CMD_DIRTY_SHADER_CONSTS) {
3377          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_CONST, cmd->state.shader_const[MESA_SHADER_VERTEX]);
3378          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_HS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_CTRL]);
3379          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DS_CONST, cmd->state.shader_const[MESA_SHADER_TESS_EVAL]);
3380          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_GS_CONST, cmd->state.shader_const[MESA_SHADER_GEOMETRY]);
3381          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_FS_CONST, cmd->state.shader_const[MESA_SHADER_FRAGMENT]);
3382       }
3383       if (cmd->state.dirty & TU_CMD_DIRTY_DESC_SETS_LOAD)
3384          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DESC_SETS_LOAD, pipeline->load_state);
3385       if (cmd->state.dirty & TU_CMD_DIRTY_VERTEX_BUFFERS)
3386          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VB, cmd->state.vertex_buffers);
3387       if (emit_binding_stride) {
3388          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_DYNAMIC + TU_DYNAMIC_STATE_VB_STRIDE,
3389                                cmd->state.dynamic_state[TU_DYNAMIC_STATE_VB_STRIDE]);
3390       }
3391       tu_cs_emit_draw_state(cs, TU_DRAW_STATE_VS_PARAMS, cmd->state.vs_params);
3392 
3393       if (cmd->state.dirty & TU_CMD_DIRTY_LRZ)
3394          tu_cs_emit_draw_state(cs, TU_DRAW_STATE_LRZ, cmd->state.lrz.state);
3395    }
3396 
3397    tu_cs_sanity_check(cs);
3398 
3399    /* There are too many graphics dirty bits to list here, so just list the
3400     * bits to preserve instead. The only things not emitted here are
3401     * compute-related state.
3402     */
3403    cmd->state.dirty &= TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
3404    return VK_SUCCESS;
3405 }
3406 
3407 static uint32_t
tu_draw_initiator(struct tu_cmd_buffer * cmd,enum pc_di_src_sel src_sel)3408 tu_draw_initiator(struct tu_cmd_buffer *cmd, enum pc_di_src_sel src_sel)
3409 {
3410    const struct tu_pipeline *pipeline = cmd->state.pipeline;
3411    enum pc_di_primtype primtype = pipeline->ia.primtype;
3412 
3413    if (pipeline->dynamic_state_mask & BIT(TU_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY))
3414       primtype = cmd->state.primtype;
3415 
3416    uint32_t initiator =
3417       CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(primtype) |
3418       CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(src_sel) |
3419       CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(cmd->state.index_size) |
3420       CP_DRAW_INDX_OFFSET_0_VIS_CULL(USE_VISIBILITY);
3421 
3422    if (pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
3423       initiator |= CP_DRAW_INDX_OFFSET_0_GS_ENABLE;
3424 
3425    switch (pipeline->tess.patch_type) {
3426    case IR3_TESS_TRIANGLES:
3427       initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_TRIANGLES) |
3428                    CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3429       break;
3430    case IR3_TESS_ISOLINES:
3431       initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_ISOLINES) |
3432                    CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3433       break;
3434    case IR3_TESS_NONE:
3435       initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS);
3436       break;
3437    case IR3_TESS_QUADS:
3438       initiator |= CP_DRAW_INDX_OFFSET_0_PATCH_TYPE(TESS_QUADS) |
3439                    CP_DRAW_INDX_OFFSET_0_TESS_ENABLE;
3440       break;
3441    }
3442    return initiator;
3443 }
3444 
3445 
3446 static uint32_t
vs_params_offset(struct tu_cmd_buffer * cmd)3447 vs_params_offset(struct tu_cmd_buffer *cmd)
3448 {
3449    const struct tu_program_descriptor_linkage *link =
3450       &cmd->state.pipeline->program.link[MESA_SHADER_VERTEX];
3451    const struct ir3_const_state *const_state = &link->const_state;
3452 
3453    if (const_state->offsets.driver_param >= link->constlen)
3454       return 0;
3455 
3456    /* this layout is required by CP_DRAW_INDIRECT_MULTI */
3457    STATIC_ASSERT(IR3_DP_DRAWID == 0);
3458    STATIC_ASSERT(IR3_DP_VTXID_BASE == 1);
3459    STATIC_ASSERT(IR3_DP_INSTID_BASE == 2);
3460 
3461    /* 0 means disabled for CP_DRAW_INDIRECT_MULTI */
3462    assert(const_state->offsets.driver_param != 0);
3463 
3464    return const_state->offsets.driver_param;
3465 }
3466 
3467 static struct tu_draw_state
tu6_emit_vs_params(struct tu_cmd_buffer * cmd,uint32_t vertex_offset,uint32_t first_instance)3468 tu6_emit_vs_params(struct tu_cmd_buffer *cmd,
3469                    uint32_t vertex_offset,
3470                    uint32_t first_instance)
3471 {
3472    uint32_t offset = vs_params_offset(cmd);
3473 
3474    struct tu_cs cs;
3475    VkResult result = tu_cs_begin_sub_stream(&cmd->sub_cs, 3 + (offset ? 8 : 0), &cs);
3476    if (result != VK_SUCCESS) {
3477       cmd->record_result = result;
3478       return (struct tu_draw_state) {};
3479    }
3480 
3481    /* TODO: don't make a new draw state when it doesn't change */
3482 
3483    tu_cs_emit_regs(&cs,
3484                    A6XX_VFD_INDEX_OFFSET(vertex_offset),
3485                    A6XX_VFD_INSTANCE_START_OFFSET(first_instance));
3486 
3487    if (offset) {
3488       tu_cs_emit_pkt7(&cs, CP_LOAD_STATE6_GEOM, 3 + 4);
3489       tu_cs_emit(&cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3490             CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3491             CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3492             CP_LOAD_STATE6_0_STATE_BLOCK(SB6_VS_SHADER) |
3493             CP_LOAD_STATE6_0_NUM_UNIT(1));
3494       tu_cs_emit(&cs, 0);
3495       tu_cs_emit(&cs, 0);
3496 
3497       tu_cs_emit(&cs, 0);
3498       tu_cs_emit(&cs, vertex_offset);
3499       tu_cs_emit(&cs, first_instance);
3500       tu_cs_emit(&cs, 0);
3501    }
3502 
3503    struct tu_cs_entry entry = tu_cs_end_sub_stream(&cmd->sub_cs, &cs);
3504    return (struct tu_draw_state) {entry.bo->iova + entry.offset, entry.size / 4};
3505 }
3506 
3507 void
tu_CmdDraw(VkCommandBuffer commandBuffer,uint32_t vertexCount,uint32_t instanceCount,uint32_t firstVertex,uint32_t firstInstance)3508 tu_CmdDraw(VkCommandBuffer commandBuffer,
3509            uint32_t vertexCount,
3510            uint32_t instanceCount,
3511            uint32_t firstVertex,
3512            uint32_t firstInstance)
3513 {
3514    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3515    struct tu_cs *cs = &cmd->draw_cs;
3516 
3517    cmd->state.vs_params = tu6_emit_vs_params(cmd, firstVertex, firstInstance);
3518 
3519    tu6_draw_common(cmd, cs, false, vertexCount);
3520 
3521    tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 3);
3522    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3523    tu_cs_emit(cs, instanceCount);
3524    tu_cs_emit(cs, vertexCount);
3525 }
3526 
3527 void
tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,uint32_t indexCount,uint32_t instanceCount,uint32_t firstIndex,int32_t vertexOffset,uint32_t firstInstance)3528 tu_CmdDrawIndexed(VkCommandBuffer commandBuffer,
3529                   uint32_t indexCount,
3530                   uint32_t instanceCount,
3531                   uint32_t firstIndex,
3532                   int32_t vertexOffset,
3533                   uint32_t firstInstance)
3534 {
3535    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3536    struct tu_cs *cs = &cmd->draw_cs;
3537 
3538    cmd->state.vs_params = tu6_emit_vs_params(cmd, vertexOffset, firstInstance);
3539 
3540    tu6_draw_common(cmd, cs, true, indexCount);
3541 
3542    tu_cs_emit_pkt7(cs, CP_DRAW_INDX_OFFSET, 7);
3543    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3544    tu_cs_emit(cs, instanceCount);
3545    tu_cs_emit(cs, indexCount);
3546    tu_cs_emit(cs, firstIndex);
3547    tu_cs_emit_qw(cs, cmd->state.index_va);
3548    tu_cs_emit(cs, cmd->state.max_index_count);
3549 }
3550 
3551 /* Various firmware bugs/inconsistencies mean that some indirect draw opcodes
3552  * do not wait for WFI's to complete before executing. Add a WAIT_FOR_ME if
3553  * pending for these opcodes. This may result in a few extra WAIT_FOR_ME's
3554  * with these opcodes, but the alternative would add unnecessary WAIT_FOR_ME's
3555  * before draw opcodes that don't need it.
3556  */
3557 static void
draw_wfm(struct tu_cmd_buffer * cmd)3558 draw_wfm(struct tu_cmd_buffer *cmd)
3559 {
3560    cmd->state.renderpass_cache.flush_bits |=
3561       cmd->state.renderpass_cache.pending_flush_bits & TU_CMD_FLAG_WAIT_FOR_ME;
3562    cmd->state.renderpass_cache.pending_flush_bits &= ~TU_CMD_FLAG_WAIT_FOR_ME;
3563 }
3564 
3565 void
tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)3566 tu_CmdDrawIndirect(VkCommandBuffer commandBuffer,
3567                    VkBuffer _buffer,
3568                    VkDeviceSize offset,
3569                    uint32_t drawCount,
3570                    uint32_t stride)
3571 {
3572    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3573    TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3574    struct tu_cs *cs = &cmd->draw_cs;
3575 
3576    cmd->state.vs_params = (struct tu_draw_state) {};
3577 
3578    /* The latest known a630_sqe.fw fails to wait for WFI before reading the
3579     * indirect buffer when using CP_DRAW_INDIRECT_MULTI, so we have to fall
3580     * back to CP_WAIT_FOR_ME except for a650 which has a fixed firmware.
3581     *
3582     * TODO: There may be newer a630_sqe.fw released in the future which fixes
3583     * this, if so we should detect it and avoid this workaround.
3584     */
3585    if (cmd->device->physical_device->gpu_id != 650)
3586       draw_wfm(cmd);
3587 
3588    tu6_draw_common(cmd, cs, false, 0);
3589 
3590    tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 6);
3591    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3592    tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL) |
3593                   A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3594    tu_cs_emit(cs, drawCount);
3595    tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3596    tu_cs_emit(cs, stride);
3597 }
3598 
3599 void
tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,uint32_t drawCount,uint32_t stride)3600 tu_CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer,
3601                           VkBuffer _buffer,
3602                           VkDeviceSize offset,
3603                           uint32_t drawCount,
3604                           uint32_t stride)
3605 {
3606    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3607    TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3608    struct tu_cs *cs = &cmd->draw_cs;
3609 
3610    cmd->state.vs_params = (struct tu_draw_state) {};
3611 
3612    if (cmd->device->physical_device->gpu_id != 650)
3613       draw_wfm(cmd);
3614 
3615    tu6_draw_common(cmd, cs, true, 0);
3616 
3617    tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 9);
3618    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3619    tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED) |
3620                   A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3621    tu_cs_emit(cs, drawCount);
3622    tu_cs_emit_qw(cs, cmd->state.index_va);
3623    tu_cs_emit(cs, cmd->state.max_index_count);
3624    tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3625    tu_cs_emit(cs, stride);
3626 }
3627 
3628 void
tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t drawCount,uint32_t stride)3629 tu_CmdDrawIndirectCount(VkCommandBuffer commandBuffer,
3630                         VkBuffer _buffer,
3631                         VkDeviceSize offset,
3632                         VkBuffer countBuffer,
3633                         VkDeviceSize countBufferOffset,
3634                         uint32_t drawCount,
3635                         uint32_t stride)
3636 {
3637    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3638    TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3639    TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
3640    struct tu_cs *cs = &cmd->draw_cs;
3641 
3642    cmd->state.vs_params = (struct tu_draw_state) {};
3643 
3644    /* It turns out that the firmware we have for a650 only partially fixed the
3645     * problem with CP_DRAW_INDIRECT_MULTI not waiting for WFI's to complete
3646     * before reading indirect parameters. It waits for WFI's before reading
3647     * the draw parameters, but after reading the indirect count :(.
3648     */
3649    draw_wfm(cmd);
3650 
3651    tu6_draw_common(cmd, cs, false, 0);
3652 
3653    tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 8);
3654    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_INDEX));
3655    tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT) |
3656                   A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3657    tu_cs_emit(cs, drawCount);
3658    tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3659    tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
3660    tu_cs_emit(cs, stride);
3661 }
3662 
3663 void
tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset,VkBuffer countBuffer,VkDeviceSize countBufferOffset,uint32_t drawCount,uint32_t stride)3664 tu_CmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer,
3665                                VkBuffer _buffer,
3666                                VkDeviceSize offset,
3667                                VkBuffer countBuffer,
3668                                VkDeviceSize countBufferOffset,
3669                                uint32_t drawCount,
3670                                uint32_t stride)
3671 {
3672    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3673    TU_FROM_HANDLE(tu_buffer, buf, _buffer);
3674    TU_FROM_HANDLE(tu_buffer, count_buf, countBuffer);
3675    struct tu_cs *cs = &cmd->draw_cs;
3676 
3677    cmd->state.vs_params = (struct tu_draw_state) {};
3678 
3679    draw_wfm(cmd);
3680 
3681    tu6_draw_common(cmd, cs, true, 0);
3682 
3683    tu_cs_emit_pkt7(cs, CP_DRAW_INDIRECT_MULTI, 11);
3684    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_DMA));
3685    tu_cs_emit(cs, A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED) |
3686                   A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(vs_params_offset(cmd)));
3687    tu_cs_emit(cs, drawCount);
3688    tu_cs_emit_qw(cs, cmd->state.index_va);
3689    tu_cs_emit(cs, cmd->state.max_index_count);
3690    tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + offset);
3691    tu_cs_emit_qw(cs, count_buf->bo->iova + count_buf->bo_offset + countBufferOffset);
3692    tu_cs_emit(cs, stride);
3693 }
3694 
tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,uint32_t instanceCount,uint32_t firstInstance,VkBuffer _counterBuffer,VkDeviceSize counterBufferOffset,uint32_t counterOffset,uint32_t vertexStride)3695 void tu_CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer,
3696                                     uint32_t instanceCount,
3697                                     uint32_t firstInstance,
3698                                     VkBuffer _counterBuffer,
3699                                     VkDeviceSize counterBufferOffset,
3700                                     uint32_t counterOffset,
3701                                     uint32_t vertexStride)
3702 {
3703    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
3704    TU_FROM_HANDLE(tu_buffer, buf, _counterBuffer);
3705    struct tu_cs *cs = &cmd->draw_cs;
3706 
3707    /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
3708     * Plus, for the common case where the counter buffer is written by
3709     * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
3710     * complete which means we need a WAIT_FOR_ME anyway.
3711     */
3712    draw_wfm(cmd);
3713 
3714    cmd->state.vs_params = tu6_emit_vs_params(cmd, 0, firstInstance);
3715 
3716    tu6_draw_common(cmd, cs, false, 0);
3717 
3718    tu_cs_emit_pkt7(cs, CP_DRAW_AUTO, 6);
3719    tu_cs_emit(cs, tu_draw_initiator(cmd, DI_SRC_SEL_AUTO_XFB));
3720    tu_cs_emit(cs, instanceCount);
3721    tu_cs_emit_qw(cs, buf->bo->iova + buf->bo_offset + counterBufferOffset);
3722    tu_cs_emit(cs, counterOffset);
3723    tu_cs_emit(cs, vertexStride);
3724 }
3725 
3726 struct tu_dispatch_info
3727 {
3728    /**
3729     * Determine the layout of the grid (in block units) to be used.
3730     */
3731    uint32_t blocks[3];
3732 
3733    /**
3734     * A starting offset for the grid. If unaligned is set, the offset
3735     * must still be aligned.
3736     */
3737    uint32_t offsets[3];
3738    /**
3739     * Whether it's an unaligned compute dispatch.
3740     */
3741    bool unaligned;
3742 
3743    /**
3744     * Indirect compute parameters resource.
3745     */
3746    struct tu_buffer *indirect;
3747    uint64_t indirect_offset;
3748 };
3749 
3750 static void
tu_emit_compute_driver_params(struct tu_cs * cs,struct tu_pipeline * pipeline,const struct tu_dispatch_info * info)3751 tu_emit_compute_driver_params(struct tu_cs *cs, struct tu_pipeline *pipeline,
3752                               const struct tu_dispatch_info *info)
3753 {
3754    gl_shader_stage type = MESA_SHADER_COMPUTE;
3755    const struct tu_program_descriptor_linkage *link =
3756       &pipeline->program.link[type];
3757    const struct ir3_const_state *const_state = &link->const_state;
3758    uint32_t offset = const_state->offsets.driver_param;
3759 
3760    if (link->constlen <= offset)
3761       return;
3762 
3763    if (!info->indirect) {
3764       uint32_t driver_params[IR3_DP_CS_COUNT] = {
3765          [IR3_DP_NUM_WORK_GROUPS_X] = info->blocks[0],
3766          [IR3_DP_NUM_WORK_GROUPS_Y] = info->blocks[1],
3767          [IR3_DP_NUM_WORK_GROUPS_Z] = info->blocks[2],
3768          [IR3_DP_LOCAL_GROUP_SIZE_X] = pipeline->compute.local_size[0],
3769          [IR3_DP_LOCAL_GROUP_SIZE_Y] = pipeline->compute.local_size[1],
3770          [IR3_DP_LOCAL_GROUP_SIZE_Z] = pipeline->compute.local_size[2],
3771       };
3772 
3773       uint32_t num_consts = MIN2(const_state->num_driver_params,
3774                                  (link->constlen - offset) * 4);
3775       /* push constants */
3776       tu_cs_emit_pkt7(cs, tu6_stage2opcode(type), 3 + num_consts);
3777       tu_cs_emit(cs, CP_LOAD_STATE6_0_DST_OFF(offset) |
3778                  CP_LOAD_STATE6_0_STATE_TYPE(ST6_CONSTANTS) |
3779                  CP_LOAD_STATE6_0_STATE_SRC(SS6_DIRECT) |
3780                  CP_LOAD_STATE6_0_STATE_BLOCK(tu6_stage2shadersb(type)) |
3781                  CP_LOAD_STATE6_0_NUM_UNIT(num_consts / 4));
3782       tu_cs_emit(cs, 0);
3783       tu_cs_emit(cs, 0);
3784       uint32_t i;
3785       for (i = 0; i < num_consts; i++)
3786          tu_cs_emit(cs, driver_params[i]);
3787    } else {
3788       tu_finishme("Indirect driver params");
3789    }
3790 }
3791 
3792 static void
tu_dispatch(struct tu_cmd_buffer * cmd,const struct tu_dispatch_info * info)3793 tu_dispatch(struct tu_cmd_buffer *cmd,
3794             const struct tu_dispatch_info *info)
3795 {
3796    struct tu_cs *cs = &cmd->cs;
3797    struct tu_pipeline *pipeline = cmd->state.compute_pipeline;
3798    struct tu_descriptor_state *descriptors_state =
3799       &cmd->descriptors[VK_PIPELINE_BIND_POINT_COMPUTE];
3800 
3801    /* TODO: We could probably flush less if we add a compute_flush_bits
3802     * bitfield.
3803     */
3804    tu_emit_cache_flush(cmd, cs);
3805 
3806    /* note: no reason to have this in a separate IB */
3807    tu_cs_emit_state_ib(cs,
3808          tu6_emit_consts(cmd, pipeline, descriptors_state, MESA_SHADER_COMPUTE));
3809 
3810    tu_emit_compute_driver_params(cs, pipeline, info);
3811 
3812    if (cmd->state.dirty & TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD)
3813       tu_cs_emit_state_ib(cs, pipeline->load_state);
3814 
3815    cmd->state.dirty &= ~TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD;
3816 
3817    tu_cs_emit_pkt7(cs, CP_SET_MARKER, 1);
3818    tu_cs_emit(cs, A6XX_CP_SET_MARKER_0_MODE(RM6_COMPUTE));
3819 
3820    const uint32_t *local_size = pipeline->compute.local_size;
3821    const uint32_t *num_groups = info->blocks;
3822    tu_cs_emit_regs(cs,
3823                    A6XX_HLSQ_CS_NDRANGE_0(.kerneldim = 3,
3824                                           .localsizex = local_size[0] - 1,
3825                                           .localsizey = local_size[1] - 1,
3826                                           .localsizez = local_size[2] - 1),
3827                    A6XX_HLSQ_CS_NDRANGE_1(.globalsize_x = local_size[0] * num_groups[0]),
3828                    A6XX_HLSQ_CS_NDRANGE_2(.globaloff_x = 0),
3829                    A6XX_HLSQ_CS_NDRANGE_3(.globalsize_y = local_size[1] * num_groups[1]),
3830                    A6XX_HLSQ_CS_NDRANGE_4(.globaloff_y = 0),
3831                    A6XX_HLSQ_CS_NDRANGE_5(.globalsize_z = local_size[2] * num_groups[2]),
3832                    A6XX_HLSQ_CS_NDRANGE_6(.globaloff_z = 0));
3833 
3834    tu_cs_emit_regs(cs,
3835                    A6XX_HLSQ_CS_KERNEL_GROUP_X(1),
3836                    A6XX_HLSQ_CS_KERNEL_GROUP_Y(1),
3837                    A6XX_HLSQ_CS_KERNEL_GROUP_Z(1));
3838 
3839    if (info->indirect) {
3840       uint64_t iova = tu_buffer_iova(info->indirect) + info->indirect_offset;
3841 
3842       tu_cs_emit_pkt7(cs, CP_EXEC_CS_INDIRECT, 4);
3843       tu_cs_emit(cs, 0x00000000);
3844       tu_cs_emit_qw(cs, iova);
3845       tu_cs_emit(cs,
3846                  A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(local_size[0] - 1) |
3847                  A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(local_size[1] - 1) |
3848                  A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(local_size[2] - 1));
3849    } else {
3850       tu_cs_emit_pkt7(cs, CP_EXEC_CS, 4);
3851       tu_cs_emit(cs, 0x00000000);
3852       tu_cs_emit(cs, CP_EXEC_CS_1_NGROUPS_X(info->blocks[0]));
3853       tu_cs_emit(cs, CP_EXEC_CS_2_NGROUPS_Y(info->blocks[1]));
3854       tu_cs_emit(cs, CP_EXEC_CS_3_NGROUPS_Z(info->blocks[2]));
3855    }
3856 
3857    tu_cs_emit_wfi(cs);
3858 }
3859 
3860 void
tu_CmdDispatchBase(VkCommandBuffer commandBuffer,uint32_t base_x,uint32_t base_y,uint32_t base_z,uint32_t x,uint32_t y,uint32_t z)3861 tu_CmdDispatchBase(VkCommandBuffer commandBuffer,
3862                    uint32_t base_x,
3863                    uint32_t base_y,
3864                    uint32_t base_z,
3865                    uint32_t x,
3866                    uint32_t y,
3867                    uint32_t z)
3868 {
3869    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3870    struct tu_dispatch_info info = {};
3871 
3872    info.blocks[0] = x;
3873    info.blocks[1] = y;
3874    info.blocks[2] = z;
3875 
3876    info.offsets[0] = base_x;
3877    info.offsets[1] = base_y;
3878    info.offsets[2] = base_z;
3879    tu_dispatch(cmd_buffer, &info);
3880 }
3881 
3882 void
tu_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t x,uint32_t y,uint32_t z)3883 tu_CmdDispatch(VkCommandBuffer commandBuffer,
3884                uint32_t x,
3885                uint32_t y,
3886                uint32_t z)
3887 {
3888    tu_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3889 }
3890 
3891 void
tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,VkBuffer _buffer,VkDeviceSize offset)3892 tu_CmdDispatchIndirect(VkCommandBuffer commandBuffer,
3893                        VkBuffer _buffer,
3894                        VkDeviceSize offset)
3895 {
3896    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3897    TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
3898    struct tu_dispatch_info info = {};
3899 
3900    info.indirect = buffer;
3901    info.indirect_offset = offset;
3902 
3903    tu_dispatch(cmd_buffer, &info);
3904 }
3905 
3906 void
tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,const VkSubpassEndInfoKHR * pSubpassEndInfo)3907 tu_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
3908                      const VkSubpassEndInfoKHR *pSubpassEndInfo)
3909 {
3910    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
3911 
3912    tu_cs_end(&cmd_buffer->draw_cs);
3913    tu_cs_end(&cmd_buffer->draw_epilogue_cs);
3914 
3915    if (use_sysmem_rendering(cmd_buffer))
3916       tu_cmd_render_sysmem(cmd_buffer);
3917    else
3918       tu_cmd_render_tiles(cmd_buffer);
3919 
3920    /* discard draw_cs and draw_epilogue_cs entries now that the tiles are
3921       rendered */
3922    tu_cs_discard_entries(&cmd_buffer->draw_cs);
3923    tu_cs_begin(&cmd_buffer->draw_cs);
3924    tu_cs_discard_entries(&cmd_buffer->draw_epilogue_cs);
3925    tu_cs_begin(&cmd_buffer->draw_epilogue_cs);
3926 
3927    cmd_buffer->state.cache.pending_flush_bits |=
3928       cmd_buffer->state.renderpass_cache.pending_flush_bits;
3929    tu_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier, true);
3930 
3931    cmd_buffer->state.pass = NULL;
3932    cmd_buffer->state.subpass = NULL;
3933    cmd_buffer->state.framebuffer = NULL;
3934    cmd_buffer->state.has_tess = false;
3935    cmd_buffer->state.has_subpass_predication = false;
3936 
3937    /* LRZ is not valid next time we use it */
3938    cmd_buffer->state.lrz.valid = false;
3939    cmd_buffer->state.dirty |= TU_CMD_DIRTY_LRZ;
3940 }
3941 
3942 struct tu_barrier_info
3943 {
3944    uint32_t eventCount;
3945    const VkEvent *pEvents;
3946    VkPipelineStageFlags srcStageMask;
3947 };
3948 
3949 static void
tu_barrier(struct tu_cmd_buffer * cmd,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers,const struct tu_barrier_info * info)3950 tu_barrier(struct tu_cmd_buffer *cmd,
3951            uint32_t memoryBarrierCount,
3952            const VkMemoryBarrier *pMemoryBarriers,
3953            uint32_t bufferMemoryBarrierCount,
3954            const VkBufferMemoryBarrier *pBufferMemoryBarriers,
3955            uint32_t imageMemoryBarrierCount,
3956            const VkImageMemoryBarrier *pImageMemoryBarriers,
3957            const struct tu_barrier_info *info)
3958 {
3959    struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
3960    VkAccessFlags srcAccessMask = 0;
3961    VkAccessFlags dstAccessMask = 0;
3962 
3963    for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3964       srcAccessMask |= pMemoryBarriers[i].srcAccessMask;
3965       dstAccessMask |= pMemoryBarriers[i].dstAccessMask;
3966    }
3967 
3968    for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3969       srcAccessMask |= pBufferMemoryBarriers[i].srcAccessMask;
3970       dstAccessMask |= pBufferMemoryBarriers[i].dstAccessMask;
3971    }
3972 
3973    enum tu_cmd_access_mask src_flags = 0;
3974    enum tu_cmd_access_mask dst_flags = 0;
3975 
3976    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3977       VkImageLayout old_layout = pImageMemoryBarriers[i].oldLayout;
3978       if (old_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3979          /* The underlying memory for this image may have been used earlier
3980           * within the same queue submission for a different image, which
3981           * means that there may be old, stale cache entries which are in the
3982           * "wrong" location, which could cause problems later after writing
3983           * to the image. We don't want these entries being flushed later and
3984           * overwriting the actual image, so we need to flush the CCU.
3985           */
3986          src_flags |= TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE;
3987       }
3988       srcAccessMask |= pImageMemoryBarriers[i].srcAccessMask;
3989       dstAccessMask |= pImageMemoryBarriers[i].dstAccessMask;
3990    }
3991 
3992    /* Inside a renderpass, we don't know yet whether we'll be using sysmem
3993     * so we have to use the sysmem flushes.
3994     */
3995    bool gmem = cmd->state.ccu_state == TU_CMD_CCU_GMEM &&
3996       !cmd->state.pass;
3997    src_flags |= vk2tu_access(srcAccessMask, gmem);
3998    dst_flags |= vk2tu_access(dstAccessMask, gmem);
3999 
4000    struct tu_cache_state *cache =
4001       cmd->state.pass  ? &cmd->state.renderpass_cache : &cmd->state.cache;
4002    tu_flush_for_access(cache, src_flags, dst_flags);
4003 
4004    for (uint32_t i = 0; i < info->eventCount; i++) {
4005       TU_FROM_HANDLE(tu_event, event, info->pEvents[i]);
4006 
4007       tu_cs_emit_pkt7(cs, CP_WAIT_REG_MEM, 6);
4008       tu_cs_emit(cs, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ) |
4009                      CP_WAIT_REG_MEM_0_POLL_MEMORY);
4010       tu_cs_emit_qw(cs, event->bo.iova); /* POLL_ADDR_LO/HI */
4011       tu_cs_emit(cs, CP_WAIT_REG_MEM_3_REF(1));
4012       tu_cs_emit(cs, CP_WAIT_REG_MEM_4_MASK(~0u));
4013       tu_cs_emit(cs, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(20));
4014    }
4015 }
4016 
4017 void
tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,VkDependencyFlags dependencyFlags,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)4018 tu_CmdPipelineBarrier(VkCommandBuffer commandBuffer,
4019                       VkPipelineStageFlags srcStageMask,
4020                       VkPipelineStageFlags dstStageMask,
4021                       VkDependencyFlags dependencyFlags,
4022                       uint32_t memoryBarrierCount,
4023                       const VkMemoryBarrier *pMemoryBarriers,
4024                       uint32_t bufferMemoryBarrierCount,
4025                       const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4026                       uint32_t imageMemoryBarrierCount,
4027                       const VkImageMemoryBarrier *pImageMemoryBarriers)
4028 {
4029    TU_FROM_HANDLE(tu_cmd_buffer, cmd_buffer, commandBuffer);
4030    struct tu_barrier_info info;
4031 
4032    info.eventCount = 0;
4033    info.pEvents = NULL;
4034    info.srcStageMask = srcStageMask;
4035 
4036    tu_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4037               bufferMemoryBarrierCount, pBufferMemoryBarriers,
4038               imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4039 }
4040 
4041 static void
write_event(struct tu_cmd_buffer * cmd,struct tu_event * event,VkPipelineStageFlags stageMask,unsigned value)4042 write_event(struct tu_cmd_buffer *cmd, struct tu_event *event,
4043             VkPipelineStageFlags stageMask, unsigned value)
4044 {
4045    struct tu_cs *cs = &cmd->cs;
4046 
4047    /* vkCmdSetEvent/vkCmdResetEvent cannot be called inside a render pass */
4048    assert(!cmd->state.pass);
4049 
4050    tu_emit_cache_flush(cmd, cs);
4051 
4052    /* Flags that only require a top-of-pipe event. DrawIndirect parameters are
4053     * read by the CP, so the draw indirect stage counts as top-of-pipe too.
4054     */
4055    VkPipelineStageFlags top_of_pipe_flags =
4056       VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT |
4057       VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
4058 
4059    if (!(stageMask & ~top_of_pipe_flags)) {
4060       tu_cs_emit_pkt7(cs, CP_MEM_WRITE, 3);
4061       tu_cs_emit_qw(cs, event->bo.iova); /* ADDR_LO/HI */
4062       tu_cs_emit(cs, value);
4063    } else {
4064       /* Use a RB_DONE_TS event to wait for everything to complete. */
4065       tu_cs_emit_pkt7(cs, CP_EVENT_WRITE, 4);
4066       tu_cs_emit(cs, CP_EVENT_WRITE_0_EVENT(RB_DONE_TS));
4067       tu_cs_emit_qw(cs, event->bo.iova);
4068       tu_cs_emit(cs, value);
4069    }
4070 }
4071 
4072 void
tu_CmdSetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)4073 tu_CmdSetEvent(VkCommandBuffer commandBuffer,
4074                VkEvent _event,
4075                VkPipelineStageFlags stageMask)
4076 {
4077    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4078    TU_FROM_HANDLE(tu_event, event, _event);
4079 
4080    write_event(cmd, event, stageMask, 1);
4081 }
4082 
4083 void
tu_CmdResetEvent(VkCommandBuffer commandBuffer,VkEvent _event,VkPipelineStageFlags stageMask)4084 tu_CmdResetEvent(VkCommandBuffer commandBuffer,
4085                  VkEvent _event,
4086                  VkPipelineStageFlags stageMask)
4087 {
4088    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4089    TU_FROM_HANDLE(tu_event, event, _event);
4090 
4091    write_event(cmd, event, stageMask, 0);
4092 }
4093 
4094 void
tu_CmdWaitEvents(VkCommandBuffer commandBuffer,uint32_t eventCount,const VkEvent * pEvents,VkPipelineStageFlags srcStageMask,VkPipelineStageFlags dstStageMask,uint32_t memoryBarrierCount,const VkMemoryBarrier * pMemoryBarriers,uint32_t bufferMemoryBarrierCount,const VkBufferMemoryBarrier * pBufferMemoryBarriers,uint32_t imageMemoryBarrierCount,const VkImageMemoryBarrier * pImageMemoryBarriers)4095 tu_CmdWaitEvents(VkCommandBuffer commandBuffer,
4096                  uint32_t eventCount,
4097                  const VkEvent *pEvents,
4098                  VkPipelineStageFlags srcStageMask,
4099                  VkPipelineStageFlags dstStageMask,
4100                  uint32_t memoryBarrierCount,
4101                  const VkMemoryBarrier *pMemoryBarriers,
4102                  uint32_t bufferMemoryBarrierCount,
4103                  const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4104                  uint32_t imageMemoryBarrierCount,
4105                  const VkImageMemoryBarrier *pImageMemoryBarriers)
4106 {
4107    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4108    struct tu_barrier_info info;
4109 
4110    info.eventCount = eventCount;
4111    info.pEvents = pEvents;
4112    info.srcStageMask = 0;
4113 
4114    tu_barrier(cmd, memoryBarrierCount, pMemoryBarriers,
4115               bufferMemoryBarrierCount, pBufferMemoryBarriers,
4116               imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4117 }
4118 
4119 void
tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer,uint32_t deviceMask)4120 tu_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
4121 {
4122    /* No-op */
4123 }
4124 
4125 
4126 void
tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,const VkConditionalRenderingBeginInfoEXT * pConditionalRenderingBegin)4127 tu_CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
4128                                    const VkConditionalRenderingBeginInfoEXT *pConditionalRenderingBegin)
4129 {
4130    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4131 
4132    cmd->state.predication_active = true;
4133    if (cmd->state.pass)
4134       cmd->state.has_subpass_predication = true;
4135 
4136    struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
4137 
4138    tu_cs_emit_pkt7(cs, CP_DRAW_PRED_ENABLE_GLOBAL, 1);
4139    tu_cs_emit(cs, 1);
4140 
4141    /* Wait for any writes to the predicate to land */
4142    if (cmd->state.pass)
4143       tu_emit_cache_flush_renderpass(cmd, cs);
4144    else
4145       tu_emit_cache_flush(cmd, cs);
4146 
4147    TU_FROM_HANDLE(tu_buffer, buf, pConditionalRenderingBegin->buffer);
4148    uint64_t iova = tu_buffer_iova(buf) + pConditionalRenderingBegin->offset;
4149 
4150    /* qcom doesn't support 32-bit reference values, only 64-bit, but Vulkan
4151     * mandates 32-bit comparisons. Our workaround is to copy the the reference
4152     * value to the low 32-bits of a location where the high 32 bits are known
4153     * to be 0 and then compare that.
4154     */
4155    tu_cs_emit_pkt7(cs, CP_MEM_TO_MEM, 5);
4156    tu_cs_emit(cs, 0);
4157    tu_cs_emit_qw(cs, global_iova(cmd, predicate));
4158    tu_cs_emit_qw(cs, iova);
4159 
4160    tu_cs_emit_pkt7(cs, CP_WAIT_MEM_WRITES, 0);
4161    tu_cs_emit_pkt7(cs, CP_WAIT_FOR_ME, 0);
4162 
4163    bool inv = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
4164    tu_cs_emit_pkt7(cs, CP_DRAW_PRED_SET, 3);
4165    tu_cs_emit(cs, CP_DRAW_PRED_SET_0_SRC(PRED_SRC_MEM) |
4166                   CP_DRAW_PRED_SET_0_TEST(inv ? EQ_0_PASS : NE_0_PASS));
4167    tu_cs_emit_qw(cs, global_iova(cmd, predicate));
4168 }
4169 
4170 void
tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)4171 tu_CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer)
4172 {
4173    TU_FROM_HANDLE(tu_cmd_buffer, cmd, commandBuffer);
4174 
4175    cmd->state.predication_active = false;
4176 
4177    struct tu_cs *cs = cmd->state.pass ? &cmd->draw_cs : &cmd->cs;
4178 
4179    tu_cs_emit_pkt7(cs, CP_DRAW_PRED_ENABLE_GLOBAL, 1);
4180    tu_cs_emit(cs, 0);
4181 }
4182 
4183