• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  * Copyright © 2018 Google, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  * Authors:
25  *    Rob Clark <robclark@freedesktop.org>
26  */
27 
28 #define FD_BO_NO_HARDPIN 1
29 
30 #include <stdio.h>
31 
32 #include "pipe/p_state.h"
33 #include "util/format/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/u_string.h"
37 
38 #include "freedreno_draw.h"
39 #include "freedreno_resource.h"
40 #include "freedreno_state.h"
41 #include "freedreno_tracepoints.h"
42 
43 #include "fd6_barrier.h"
44 #include "fd6_blitter.h"
45 #include "fd6_context.h"
46 #include "fd6_draw.h"
47 #include "fd6_emit.h"
48 #include "fd6_gmem.h"
49 #include "fd6_pack.h"
50 #include "fd6_program.h"
51 #include "fd6_resource.h"
52 #include "fd6_zsa.h"
53 
54 /**
55  * Emits the flags registers, suitable for RB_MRT_FLAG_BUFFER,
56  * RB_DEPTH_FLAG_BUFFER, SP_PS_2D_SRC_FLAGS, and RB_BLIT_FLAG_DST.
57  */
58 void
fd6_emit_flag_reference(struct fd_ringbuffer * ring,struct fd_resource * rsc,int level,int layer)59 fd6_emit_flag_reference(struct fd_ringbuffer *ring, struct fd_resource *rsc,
60                         int level, int layer)
61 {
62    if (fd_resource_ubwc_enabled(rsc, level)) {
63       OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0,
64                 0);
65       OUT_RING(ring, A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(
66                         fdl_ubwc_pitch(&rsc->layout, level)) |
67                         A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(
68                            rsc->layout.ubwc_layer_size >> 2));
69    } else {
70       OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
71       OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
72       OUT_RING(ring, 0x00000000);
73    }
74 }
75 
76 template <chip CHIP>
77 static void
emit_mrt(struct fd_ringbuffer * ring,struct pipe_framebuffer_state * pfb,const struct fd_gmem_stateobj * gmem)78 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
79          const struct fd_gmem_stateobj *gmem)
80 {
81    unsigned srgb_cntl = 0;
82    unsigned i;
83 
84    /* Note, GLES 3.2 says "If the fragment’s layer number is negative, or
85     * greater than or equal to the minimum number of layers of any attachment,
86     * the effects of the fragment on the framebuffer contents are undefined."
87     */
88    unsigned max_layer_index = 0;
89    enum a6xx_format mrt0_format = (enum a6xx_format)0;
90 
91    for (i = 0; i < pfb->nr_cbufs; i++) {
92       enum a3xx_color_swap swap = WZYX;
93       bool sint = false, uint = false;
94       struct fd_resource *rsc = NULL;
95       ASSERTED struct fdl_slice *slice = NULL;
96       uint32_t stride = 0;
97       uint32_t array_stride = 0;
98       uint32_t offset;
99 
100       if (!pfb->cbufs[i])
101          continue;
102 
103       struct pipe_surface *psurf = pfb->cbufs[i];
104       enum pipe_format pformat = psurf->format;
105       rsc = fd_resource(psurf->texture);
106 
107       uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
108       slice = fd_resource_slice(rsc, psurf->u.tex.level);
109       enum a6xx_tile_mode tile_mode = (enum a6xx_tile_mode)
110             fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
111       enum a6xx_format format = fd6_color_format(pformat, tile_mode);
112       sint = util_format_is_pure_sint(pformat);
113       uint = util_format_is_pure_uint(pformat);
114 
115       if (util_format_is_srgb(pformat))
116          srgb_cntl |= (1 << i);
117 
118       offset =
119          fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
120 
121       stride = fd_resource_pitch(rsc, psurf->u.tex.level);
122       array_stride = fd_resource_layer_stride(rsc, psurf->u.tex.level);
123       swap = fd6_color_swap(pformat, (enum a6xx_tile_mode)rsc->layout.tile_mode);
124 
125       max_layer_index = psurf->u.tex.last_layer - psurf->u.tex.first_layer;
126 
127       assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
128 
129       /* Batch with no draws? */
130       fd_ringbuffer_attach_bo(ring, rsc->bo);
131 
132       OUT_REG(
133          ring,
134          RB_MRT_BUF_INFO(CHIP, i, .color_format = format,
135                               .color_tile_mode = tile_mode, .color_swap = swap),
136          A6XX_RB_MRT_PITCH(i, stride),
137          A6XX_RB_MRT_ARRAY_PITCH(i, array_stride),
138          A6XX_RB_MRT_BASE(i, .bo = rsc->bo, .bo_offset = offset),
139          A6XX_RB_MRT_BASE_GMEM(i, base));
140 
141       OUT_REG(ring, A6XX_SP_FS_MRT_REG(i, .color_format = format,
142                                        .color_sint = sint, .color_uint = uint));
143 
144       OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
145       fd6_emit_flag_reference(ring, rsc, psurf->u.tex.level,
146                               psurf->u.tex.first_layer);
147 
148       if (i == 0)
149          mrt0_format = format;
150    }
151    if (pfb->zsbuf)
152       max_layer_index = pfb->zsbuf->u.tex.last_layer - pfb->zsbuf->u.tex.first_layer;
153 
154    OUT_REG(ring, A6XX_GRAS_LRZ_MRT_BUF_INFO_0(.color_format = mrt0_format));
155 
156    OUT_REG(ring, A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
157    OUT_REG(ring, A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
158 
159    OUT_REG(ring, A6XX_GRAS_MAX_LAYER_INDEX(max_layer_index));
160 }
161 
162 template <chip CHIP>
163 static void
emit_zs(struct fd_ringbuffer * ring,struct pipe_surface * zsbuf,const struct fd_gmem_stateobj * gmem)164 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
165         const struct fd_gmem_stateobj *gmem)
166 {
167    if (zsbuf) {
168       struct fd_resource *rsc = fd_resource(zsbuf->texture);
169       enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
170       uint32_t stride = fd_resource_pitch(rsc, zsbuf->u.tex.level);
171       uint32_t array_stride = fd_resource_layer_stride(rsc, zsbuf->u.tex.level);
172       uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
173       uint32_t offset =
174          fd_resource_offset(rsc, zsbuf->u.tex.level, zsbuf->u.tex.first_layer);
175 
176       /* We could have a depth buffer, but no draws with depth write/test
177        * enabled, in which case it wouldn't have been part of the batch
178        * resource tracking
179        */
180       fd_ringbuffer_attach_bo(ring, rsc->bo);
181 
182       OUT_REG(
183          ring, RB_DEPTH_BUFFER_INFO(CHIP, .depth_format = fmt),
184          A6XX_RB_DEPTH_BUFFER_PITCH(stride),
185          A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(array_stride),
186          A6XX_RB_DEPTH_BUFFER_BASE(.bo = rsc->bo, .bo_offset = offset),
187          A6XX_RB_DEPTH_BUFFER_BASE_GMEM(base));
188 
189       OUT_REG(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
190 
191       OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE, 3);
192       fd6_emit_flag_reference(ring, rsc, zsbuf->u.tex.level,
193                               zsbuf->u.tex.first_layer);
194 
195       /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
196        * plus this CP_EVENT_WRITE at the end in it's own IB..
197        */
198       OUT_PKT7(ring, CP_EVENT_WRITE, 1);
199       OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(LRZ_CLEAR));
200 
201       if (rsc->stencil) {
202          stride = fd_resource_pitch(rsc->stencil, zsbuf->u.tex.level);
203          array_stride = fd_resource_layer_stride(rsc->stencil, zsbuf->u.tex.level);
204          uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
205          uint32_t offset =
206             fd_resource_offset(rsc->stencil, zsbuf->u.tex.level, zsbuf->u.tex.first_layer);
207 
208          fd_ringbuffer_attach_bo(ring, rsc->stencil->bo);
209 
210          OUT_REG(ring, RB_STENCIL_INFO(CHIP, .separate_stencil = true),
211                  A6XX_RB_STENCIL_BUFFER_PITCH(stride),
212                  A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(array_stride),
213                  A6XX_RB_STENCIL_BUFFER_BASE(.bo = rsc->stencil->bo, .bo_offset = offset),
214                  A6XX_RB_STENCIL_BUFFER_BASE_GMEM(base));
215       } else {
216          OUT_REG(ring, RB_STENCIL_INFO(CHIP, 0));
217       }
218    } else {
219       OUT_REG(ring,
220               RB_DEPTH_BUFFER_INFO(
221                     CHIP,
222                     .depth_format = DEPTH6_NONE,
223               ),
224               A6XX_RB_DEPTH_BUFFER_PITCH(),
225               A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(),
226               A6XX_RB_DEPTH_BUFFER_BASE(),
227               A6XX_RB_DEPTH_BUFFER_BASE_GMEM(),
228       );
229 
230       OUT_REG(ring,
231               A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
232 
233       OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE, 5);
234       OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_BASE_LO */
235       OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_BASE_HI */
236       OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
237       OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
238       OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
239 
240       OUT_REG(ring, RB_STENCIL_INFO(CHIP, 0));
241    }
242 }
243 
244 static void
emit_lrz(struct fd_batch * batch,struct fd_batch_subpass * subpass)245 emit_lrz(struct fd_batch *batch, struct fd_batch_subpass *subpass)
246 {
247    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
248    struct fd_ringbuffer *ring = batch->gmem;
249 
250    if (!subpass->lrz) {
251       OUT_REG(ring, A6XX_GRAS_LRZ_BUFFER_BASE(),
252               A6XX_GRAS_LRZ_BUFFER_PITCH(),
253               A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE());
254       return;
255    }
256 
257    /* When swapping LRZ buffers we need to flush LRZ cache..
258     * we possibly don't need this during the binning pass, it
259     * appears that the corruption happens on the read-side, ie.
260     * we change the LRZ buffer after a sub-pass, but get a
261     * cache-hit on stale data from the previous LRZ buffer.
262     */
263    fd6_emit_lrz_flush(ring);
264 
265    struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
266    OUT_REG(ring, A6XX_GRAS_LRZ_BUFFER_BASE(.bo = subpass->lrz),
267            A6XX_GRAS_LRZ_BUFFER_PITCH(.pitch = zsbuf->lrz_pitch),
268            A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE());
269    fd_ringbuffer_attach_bo(ring, subpass->lrz);
270 }
271 
272 /* Emit any needed lrz clears to the prologue cmds
273  */
274 template <chip CHIP>
275 static void
emit_lrz_clears(struct fd_batch * batch)276 emit_lrz_clears(struct fd_batch *batch)
277 {
278    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
279    struct fd_context *ctx = batch->ctx;
280    unsigned count = 0;
281 
282    if (!pfb->zsbuf)
283       return;
284 
285    struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
286 
287    foreach_subpass (subpass, batch) {
288       /* The lrz buffer isn't explicitly tracked by the batch resource
289        * tracking (tracking the zsbuf is sufficient), but it still needs
290        * to be attached to the ring
291        */
292       if (subpass->lrz)
293          fd_ringbuffer_attach_bo(batch->gmem, subpass->lrz);
294 
295       if (!(subpass->fast_cleared & FD_BUFFER_LRZ))
296          continue;
297 
298       subpass->fast_cleared &= ~FD_BUFFER_LRZ;
299 
300       /* prep before first clear: */
301       if (count == 0) {
302          struct fd_ringbuffer *ring = fd_batch_get_prologue(batch);
303 
304          fd6_emit_ccu_cntl(ring, ctx->screen, false);
305 
306          OUT_PKT7(ring, CP_SET_MARKER, 1);
307          OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BLIT2DSCALE));
308 
309          fd6_emit_flushes(ctx, ring, FD6_FLUSH_CACHE);
310 
311          if (ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL_blit !=
312              ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL) {
313             /* This a non-context register, so we have to WFI before changing. */
314             OUT_WFI5(ring);
315             OUT_PKT4(ring, REG_A6XX_RB_DBG_ECO_CNTL, 1);
316             OUT_RING(ring, ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL_blit);
317          }
318       }
319 
320       fd6_clear_lrz<CHIP>(batch, zsbuf, subpass->lrz, subpass->clear_depth);
321 
322       count++;
323    }
324 
325    /* cleanup after last clear: */
326    if (count > 0) {
327       struct fd_ringbuffer *ring = fd_batch_get_prologue(batch);
328 
329       if (ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL_blit !=
330           ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL) {
331          OUT_WFI5(ring);
332          OUT_PKT4(ring, REG_A6XX_RB_DBG_ECO_CNTL, 1);
333          OUT_RING(ring, ctx->screen->info->a6xx.magic.RB_DBG_ECO_CNTL);
334       }
335 
336       /* Clearing writes via CCU color in the PS stage, and LRZ is read via
337        * UCHE in the earlier GRAS stage.
338        *
339        * Note tu also asks for WFI but maybe that is only needed if
340        * has_ccu_flush_bug (and it is added by fd6_emit_flushes() already
341        * in that case)
342        */
343       fd6_emit_flushes(batch->ctx, ring,
344                        FD6_FLUSH_CCU_COLOR |
345                        FD6_INVALIDATE_CACHE);
346    }
347 }
348 
349 static bool
use_hw_binning(struct fd_batch * batch)350 use_hw_binning(struct fd_batch *batch)
351 {
352    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
353 
354    if ((gmem->maxpw * gmem->maxph) > 32)
355       return false;
356 
357    return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) >= 2) &&
358           (batch->num_draws > 0);
359 }
360 
361 static void
patch_fb_read_gmem(struct fd_batch * batch)362 patch_fb_read_gmem(struct fd_batch *batch)
363 {
364    struct fd_screen *screen = batch->ctx->screen;
365    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
366    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
367 
368    unsigned num_patches = fd_patch_num_elements(&batch->fb_read_patches);
369    if (!num_patches)
370       return;
371 
372    for (unsigned i = 0; i < num_patches; i++) {
373      struct fd_cs_patch *patch =
374         fd_patch_element(&batch->fb_read_patches, i);
375       int buf = patch->val;
376       struct pipe_surface *psurf = pfb->cbufs[buf];
377       struct pipe_resource *prsc = psurf->texture;
378       struct fd_resource *rsc = fd_resource(prsc);
379       enum pipe_format format = psurf->format;
380 
381       uint8_t swiz[4];
382       fdl6_format_swiz(psurf->format, false, swiz);
383 
384       uint64_t base = screen->gmem_base + gmem->cbuf_base[buf];
385       /* always TILE6_2 mode in GMEM, which also means no swap: */
386       uint32_t descriptor[FDL6_TEX_CONST_DWORDS] = {
387             A6XX_TEX_CONST_0_FMT(fd6_texture_format(
388                   format, (enum a6xx_tile_mode)rsc->layout.tile_mode)) |
389             A6XX_TEX_CONST_0_SAMPLES(fd_msaa_samples(prsc->nr_samples)) |
390             A6XX_TEX_CONST_0_SWAP(WZYX) |
391             A6XX_TEX_CONST_0_TILE_MODE(TILE6_2) |
392             COND(util_format_is_srgb(format), A6XX_TEX_CONST_0_SRGB) |
393             A6XX_TEX_CONST_0_SWIZ_X(fdl6_swiz(swiz[0])) |
394             A6XX_TEX_CONST_0_SWIZ_Y(fdl6_swiz(swiz[1])) |
395             A6XX_TEX_CONST_0_SWIZ_Z(fdl6_swiz(swiz[2])) |
396             A6XX_TEX_CONST_0_SWIZ_W(fdl6_swiz(swiz[3])),
397 
398          A6XX_TEX_CONST_1_WIDTH(pfb->width) |
399             A6XX_TEX_CONST_1_HEIGHT(pfb->height),
400 
401          A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[buf]) |
402             A6XX_TEX_CONST_2_TYPE(A6XX_TEX_2D),
403 
404          A6XX_TEX_CONST_3_ARRAY_PITCH(rsc->layout.layer_size),
405          A6XX_TEX_CONST_4_BASE_LO(base),
406 
407          A6XX_TEX_CONST_5_BASE_HI(base >> 32) |
408             A6XX_TEX_CONST_5_DEPTH(prsc->array_size)
409       };
410 
411       memcpy(patch->cs, descriptor, FDL6_TEX_CONST_DWORDS * 4);
412    }
413 
414    util_dynarray_clear(&batch->fb_read_patches);
415 }
416 
417 static void
patch_fb_read_sysmem(struct fd_batch * batch)418 patch_fb_read_sysmem(struct fd_batch *batch)
419 {
420    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
421 
422    unsigned num_patches =
423       fd_patch_num_elements(&batch->fb_read_patches);
424    if (!num_patches)
425       return;
426    for (unsigned i = 0; i < num_patches; i++) {
427      struct fd_cs_patch *patch =
428         fd_patch_element(&batch->fb_read_patches, i);
429       int buf = patch->val;
430 
431       struct pipe_surface *psurf = pfb->cbufs[buf];
432       if (!psurf)
433          return;
434 
435       struct pipe_resource *prsc = psurf->texture;
436       struct fd_resource *rsc = fd_resource(prsc);
437 
438       uint32_t block_width, block_height;
439       fdl6_get_ubwc_blockwidth(&rsc->layout, &block_width, &block_height);
440 
441       struct fdl_view_args args = {
442          .chip = A6XX,
443 
444          .iova = fd_bo_get_iova(rsc->bo),
445 
446          .base_miplevel = psurf->u.tex.level,
447          .level_count = 1,
448 
449          .base_array_layer = psurf->u.tex.first_layer,
450          .layer_count = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1,
451 
452          .swiz = {PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y, PIPE_SWIZZLE_Z,
453                   PIPE_SWIZZLE_W},
454          .format = psurf->format,
455 
456          .type = FDL_VIEW_TYPE_2D,
457          .chroma_offsets = {FDL_CHROMA_LOCATION_COSITED_EVEN,
458                             FDL_CHROMA_LOCATION_COSITED_EVEN},
459       };
460       const struct fdl_layout *layouts[3] = {&rsc->layout, NULL, NULL};
461       struct fdl6_view view;
462       fdl6_view_init(&view, layouts, &args,
463                      batch->ctx->screen->info->a6xx.has_z24uint_s8uint);
464       memcpy(patch->cs, view.descriptor, FDL6_TEX_CONST_DWORDS * 4);
465    }
466 
467    util_dynarray_clear(&batch->fb_read_patches);
468 }
469 
470 template <chip CHIP>
471 static void
update_render_cntl(struct fd_batch * batch,struct pipe_framebuffer_state * pfb,bool binning)472 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb,
473                    bool binning)
474 {
475    struct fd_ringbuffer *ring = batch->gmem;
476    struct fd_screen *screen = batch->ctx->screen;
477    bool depth_ubwc_enable = false;
478    uint32_t mrts_ubwc_enable = 0;
479    int i;
480 
481    if (pfb->zsbuf) {
482       struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
483       depth_ubwc_enable =
484          fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
485    }
486 
487    for (i = 0; i < pfb->nr_cbufs; i++) {
488       if (!pfb->cbufs[i])
489          continue;
490 
491       struct pipe_surface *psurf = pfb->cbufs[i];
492       struct fd_resource *rsc = fd_resource(psurf->texture);
493 
494       if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
495          mrts_ubwc_enable |= 1 << i;
496    }
497 
498    struct fd_reg_pair rb_render_cntl = RB_RENDER_CNTL(
499          CHIP,
500          .ccusinglecachelinesize = 2,
501          .binning = binning,
502          .flag_depth = depth_ubwc_enable,
503          .flag_mrts = mrts_ubwc_enable,
504    );
505 
506    if (screen->info->a6xx.has_cp_reg_write) {
507       OUT_PKT(ring, CP_REG_WRITE,
508               CP_REG_WRITE_0(TRACK_RENDER_CNTL),
509               CP_REG_WRITE_1(rb_render_cntl.reg),
510               CP_REG_WRITE_2(rb_render_cntl.value),
511       );
512    } else {
513       OUT_REG(ring, rb_render_cntl);
514    }
515 }
516 
517 static void
update_vsc_pipe(struct fd_batch * batch)518 update_vsc_pipe(struct fd_batch *batch)
519 {
520    struct fd_context *ctx = batch->ctx;
521    struct fd6_context *fd6_ctx = fd6_context(ctx);
522    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
523    struct fd_ringbuffer *ring = batch->gmem;
524    unsigned max_vsc_pipes = batch->ctx->screen->info->num_vsc_pipes;
525    int i;
526 
527    if (batch->draw_strm_bits / 8 > fd6_ctx->vsc_draw_strm_pitch) {
528       if (fd6_ctx->vsc_draw_strm)
529          fd_bo_del(fd6_ctx->vsc_draw_strm);
530       fd6_ctx->vsc_draw_strm = NULL;
531       /* Note: probably only need to align to 0x40, but aligning stronger
532        * reduces the odds that we will have to realloc again on the next
533        * frame:
534        */
535       fd6_ctx->vsc_draw_strm_pitch = align(batch->draw_strm_bits / 8, 0x4000);
536       mesa_logd("pre-resize VSC_DRAW_STRM_PITCH to: 0x%x",
537                 fd6_ctx->vsc_draw_strm_pitch);
538    }
539 
540    if (batch->prim_strm_bits / 8 > fd6_ctx->vsc_prim_strm_pitch) {
541       if (fd6_ctx->vsc_prim_strm)
542          fd_bo_del(fd6_ctx->vsc_prim_strm);
543       fd6_ctx->vsc_prim_strm = NULL;
544       fd6_ctx->vsc_prim_strm_pitch = align(batch->prim_strm_bits / 8, 0x4000);
545       mesa_logd("pre-resize VSC_PRIM_STRM_PITCH to: 0x%x",
546                 fd6_ctx->vsc_prim_strm_pitch);
547    }
548 
549    if (!fd6_ctx->vsc_draw_strm) {
550       /* We also use four bytes per vsc pipe at the end of the draw
551        * stream buffer for VSC_DRAW_STRM_SIZE written back by hw
552        * (see VSC_DRAW_STRM_SIZE_ADDRESS)
553        */
554       unsigned sz = (max_vsc_pipes * fd6_ctx->vsc_draw_strm_pitch) +
555                     (max_vsc_pipes * 4);
556       fd6_ctx->vsc_draw_strm =
557          fd_bo_new(ctx->screen->dev, sz, FD_BO_NOMAP, "vsc_draw_strm");
558    }
559 
560    if (!fd6_ctx->vsc_prim_strm) {
561       unsigned sz = max_vsc_pipes * fd6_ctx->vsc_prim_strm_pitch;
562       fd6_ctx->vsc_prim_strm =
563          fd_bo_new(ctx->screen->dev, sz, FD_BO_NOMAP, "vsc_prim_strm");
564    }
565 
566    fd_ringbuffer_attach_bo(ring, fd6_ctx->vsc_draw_strm);
567    fd_ringbuffer_attach_bo(ring, fd6_ctx->vsc_prim_strm);
568 
569    OUT_REG(ring, A6XX_VSC_BIN_SIZE(.width = gmem->bin_w, .height = gmem->bin_h),
570            A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(.bo = fd6_ctx->vsc_draw_strm,
571                                            .bo_offset = max_vsc_pipes *
572                                               fd6_ctx->vsc_draw_strm_pitch));
573 
574    OUT_REG(ring, A6XX_VSC_BIN_COUNT(.nx = gmem->nbins_x, .ny = gmem->nbins_y));
575 
576    OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), max_vsc_pipes);
577    for (i = 0; i < max_vsc_pipes; i++) {
578       const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
579       OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
580                         A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
581                         A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
582                         A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
583    }
584 
585    OUT_REG(
586       ring, A6XX_VSC_PRIM_STRM_ADDRESS(.bo = fd6_ctx->vsc_prim_strm),
587       A6XX_VSC_PRIM_STRM_PITCH(.dword = fd6_ctx->vsc_prim_strm_pitch),
588       A6XX_VSC_PRIM_STRM_LIMIT(.dword = fd6_ctx->vsc_prim_strm_pitch - 64));
589 
590    OUT_REG(
591       ring, A6XX_VSC_DRAW_STRM_ADDRESS(.bo = fd6_ctx->vsc_draw_strm),
592       A6XX_VSC_DRAW_STRM_PITCH(.dword = fd6_ctx->vsc_draw_strm_pitch),
593       A6XX_VSC_DRAW_STRM_LIMIT(.dword = fd6_ctx->vsc_draw_strm_pitch - 64));
594 }
595 
596 /*
597  * If overflow is detected, either 0x1 (VSC_DRAW_STRM overflow) or 0x3
598  * (VSC_PRIM_STRM overflow) plus the size of the overflowed buffer is
599  * written to control->vsc_overflow.  This allows the CPU to
600  * detect which buffer overflowed (and, since the current size is
601  * encoded as well, this protects against already-submitted but
602  * not executed batches from fooling the CPU into increasing the
603  * size again unnecessarily).
604  */
605 static void
emit_vsc_overflow_test(struct fd_batch * batch)606 emit_vsc_overflow_test(struct fd_batch *batch)
607 {
608    struct fd_ringbuffer *ring = batch->gmem;
609    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
610    struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
611 
612    assert((fd6_ctx->vsc_draw_strm_pitch & 0x3) == 0);
613    assert((fd6_ctx->vsc_prim_strm_pitch & 0x3) == 0);
614 
615    /* Check for overflow, write vsc_scratch if detected: */
616    for (int i = 0; i < gmem->num_vsc_pipes; i++) {
617       OUT_PKT7(ring, CP_COND_WRITE5, 8);
618       OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
619                         CP_COND_WRITE5_0_WRITE_MEMORY);
620       OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(
621                         REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
622       OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
623       OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_draw_strm_pitch - 64));
624       OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
625       OUT_RELOC(ring,
626                 control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
627       OUT_RING(ring,
628                CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_draw_strm_pitch));
629 
630       OUT_PKT7(ring, CP_COND_WRITE5, 8);
631       OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
632                         CP_COND_WRITE5_0_WRITE_MEMORY);
633       OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(
634                         REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
635       OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
636       OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_prim_strm_pitch - 64));
637       OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
638       OUT_RELOC(ring,
639                 control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
640       OUT_RING(ring,
641                CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_prim_strm_pitch));
642    }
643 
644    OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
645 }
646 
647 static void
check_vsc_overflow(struct fd_context * ctx)648 check_vsc_overflow(struct fd_context *ctx)
649 {
650    struct fd6_context *fd6_ctx = fd6_context(ctx);
651    struct fd6_control *control =
652          (struct fd6_control *)fd_bo_map(fd6_ctx->control_mem);
653    uint32_t vsc_overflow = control->vsc_overflow;
654 
655    if (!vsc_overflow)
656       return;
657 
658    /* clear overflow flag: */
659    control->vsc_overflow = 0;
660 
661    unsigned buffer = vsc_overflow & 0x3;
662    unsigned size = vsc_overflow & ~0x3;
663 
664    if (buffer == 0x1) {
665       /* VSC_DRAW_STRM overflow: */
666 
667       if (size < fd6_ctx->vsc_draw_strm_pitch) {
668          /* we've already increased the size, this overflow is
669           * from a batch submitted before resize, but executed
670           * after
671           */
672          return;
673       }
674 
675       fd_bo_del(fd6_ctx->vsc_draw_strm);
676       fd6_ctx->vsc_draw_strm = NULL;
677       fd6_ctx->vsc_draw_strm_pitch *= 2;
678 
679       mesa_logd("resized VSC_DRAW_STRM_PITCH to: 0x%x",
680                 fd6_ctx->vsc_draw_strm_pitch);
681 
682    } else if (buffer == 0x3) {
683       /* VSC_PRIM_STRM overflow: */
684 
685       if (size < fd6_ctx->vsc_prim_strm_pitch) {
686          /* we've already increased the size */
687          return;
688       }
689 
690       fd_bo_del(fd6_ctx->vsc_prim_strm);
691       fd6_ctx->vsc_prim_strm = NULL;
692       fd6_ctx->vsc_prim_strm_pitch *= 2;
693 
694       mesa_logd("resized VSC_PRIM_STRM_PITCH to: 0x%x",
695                 fd6_ctx->vsc_prim_strm_pitch);
696 
697    } else {
698       /* NOTE: it's possible, for example, for overflow to corrupt the
699        * control page.  I mostly just see this hit if I set initial VSC
700        * buffer size extremely small.  Things still seem to recover,
701        * but maybe we should pre-emptively realloc vsc_data/vsc_data2
702        * and hope for different memory placement?
703        */
704       mesa_loge("invalid vsc_overflow value: 0x%08x", vsc_overflow);
705    }
706 }
707 
708 static void
emit_common_init(struct fd_batch * batch)709 emit_common_init(struct fd_batch *batch)
710 {
711    struct fd_ringbuffer *ring = batch->gmem;
712    struct fd_autotune *at = &batch->ctx->autotune;
713    struct fd_batch_result *result = batch->autotune_result;
714 
715    if (!result)
716       return;
717 
718    fd_ringbuffer_attach_bo(ring, at->results_mem);
719 
720    OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_COUNT_CONTROL, 1);
721    OUT_RING(ring, A6XX_RB_SAMPLE_COUNT_CONTROL_COPY);
722 
723    OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_COUNT_ADDR, 2);
724    OUT_RELOC(ring, results_ptr(at, result[result->idx].samples_start));
725 
726    fd6_event_write(batch, ring, ZPASS_DONE, false);
727 }
728 
729 static void
emit_common_fini(struct fd_batch * batch)730 emit_common_fini(struct fd_batch *batch)
731 {
732    struct fd_ringbuffer *ring = batch->gmem;
733    struct fd_autotune *at = &batch->ctx->autotune;
734    struct fd_batch_result *result = batch->autotune_result;
735 
736    fd6_emit_flushes(batch->ctx, ring, batch->barrier);
737 
738    if (!result)
739       return;
740 
741    // TODO attach directly to submit:
742    fd_ringbuffer_attach_bo(ring, at->results_mem);
743 
744    OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_COUNT_CONTROL, 1);
745    OUT_RING(ring, A6XX_RB_SAMPLE_COUNT_CONTROL_COPY);
746 
747    OUT_PKT4(ring, REG_A6XX_RB_SAMPLE_COUNT_ADDR, 2);
748    OUT_RELOC(ring, results_ptr(at, result[result->idx].samples_end));
749 
750    fd6_event_write(batch, ring, ZPASS_DONE, false);
751 
752    // TODO is there a better event to use.. a single ZPASS_DONE_TS would be nice
753    OUT_PKT7(ring, CP_EVENT_WRITE, 4);
754    OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
755    OUT_RELOC(ring, results_ptr(at, fence));
756    OUT_RING(ring, result->fence);
757 }
758 
759 /*
760  * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
761  * is skipped for tiles that have no visible geometry.
762  *
763  * If we aren't using binning pass, this just emits a normal IB.
764  */
765 static void
emit_conditional_ib(struct fd_batch * batch,const struct fd_tile * tile,struct fd_ringbuffer * target)766 emit_conditional_ib(struct fd_batch *batch, const struct fd_tile *tile,
767                     struct fd_ringbuffer *target)
768 {
769    struct fd_ringbuffer *ring = batch->gmem;
770 
771    /* If we have fast clear, that won't count in the VSC state, so it
772     * forces an unconditional IB (because we know there is something
773     * to do for this tile)
774     */
775    if (batch->cleared || !use_hw_binning(batch)) {
776       fd6_emit_ib(batch->gmem, target);
777       return;
778    }
779 
780    if (target->cur == target->start)
781       return;
782 
783    emit_marker6(ring, 6);
784 
785    unsigned count = fd_ringbuffer_cmd_count(target);
786 
787    BEGIN_RING(ring, 5 + 4 * count); /* ensure conditional doesn't get split */
788 
789    OUT_PKT7(ring, CP_REG_TEST, 1);
790    OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile->p)) |
791                      A6XX_CP_REG_TEST_0_BIT(tile->n) |
792                      A6XX_CP_REG_TEST_0_SKIP_WAIT_FOR_ME);
793 
794    OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
795    OUT_RING(ring, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
796    OUT_RING(ring, PRED_TEST_CP_COND_REG_EXEC_1_DWORDS(4 * count));
797 
798    for (unsigned i = 0; i < count; i++) {
799       uint32_t dwords;
800       OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
801       dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
802       assert(dwords > 0);
803       OUT_RING(ring, dwords);
804    }
805 
806    emit_marker6(ring, 6);
807 }
808 
809 static void
set_scissor(struct fd_ringbuffer * ring,uint32_t x1,uint32_t y1,uint32_t x2,uint32_t y2)810 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2,
811             uint32_t y2)
812 {
813    OUT_REG(ring, A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
814            A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
815 
816    OUT_REG(ring, A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
817            A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
818 }
819 
820 struct bin_size_params {
821    enum a6xx_render_mode render_mode;
822    bool force_lrz_write_dis;
823    enum a6xx_buffers_location buffers_location;
824    unsigned lrz_feedback_zmode_mask;
825 };
826 
827 template <chip CHIP>
828 static void
set_bin_size(struct fd_ringbuffer * ring,const struct fd_gmem_stateobj * gmem,struct bin_size_params p)829 set_bin_size(struct fd_ringbuffer *ring, const struct fd_gmem_stateobj *gmem,
830              struct bin_size_params p)
831 {
832    unsigned w = gmem ? gmem->bin_w : 0;
833    unsigned h = gmem ? gmem->bin_h : 0;
834 
835    OUT_REG(ring, A6XX_GRAS_BIN_CONTROL(
836          .binw = w, .binh = h,
837          .render_mode = p.render_mode,
838          .force_lrz_write_dis = p.force_lrz_write_dis,
839          .buffers_location = p.buffers_location,
840          .lrz_feedback_zmode_mask = p.lrz_feedback_zmode_mask,
841    ));
842    OUT_REG(ring, RB_BIN_CONTROL(
843          CHIP,
844          .binw = w, .binh = h,
845          .render_mode = p.render_mode,
846          .force_lrz_write_dis = p.force_lrz_write_dis,
847          .buffers_location = p.buffers_location,
848          .lrz_feedback_zmode_mask = p.lrz_feedback_zmode_mask,
849    ));
850    /* no flag for RB_BIN_CONTROL2... */
851    OUT_REG(ring, A6XX_RB_BIN_CONTROL2(.binw = w, .binh = h));
852 }
853 
854 static void
emit_binning_pass(struct fd_batch * batch)855 emit_binning_pass(struct fd_batch *batch) assert_dt
856 {
857    struct fd_ringbuffer *ring = batch->gmem;
858    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
859    struct fd_screen *screen = batch->ctx->screen;
860 
861    assert(!batch->tessellation);
862 
863    set_scissor(ring, 0, 0, gmem->width - 1, gmem->height - 1);
864 
865    emit_marker6(ring, 7);
866    OUT_PKT7(ring, CP_SET_MARKER, 1);
867    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
868    emit_marker6(ring, 7);
869 
870    OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
871    OUT_RING(ring, 0x1);
872 
873    OUT_PKT7(ring, CP_SET_MODE, 1);
874    OUT_RING(ring, 0x1);
875 
876    OUT_WFI5(ring);
877 
878    OUT_REG(ring, A6XX_VFD_MODE_CNTL(.render_mode = BINNING_PASS));
879 
880    update_vsc_pipe(batch);
881 
882    OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
883    OUT_RING(ring, screen->info->a6xx.magic.PC_POWER_CNTL);
884 
885    OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
886    OUT_RING(ring, screen->info->a6xx.magic.PC_POWER_CNTL);
887 
888    OUT_PKT7(ring, CP_EVENT_WRITE, 1);
889    OUT_RING(ring, UNK_2C);
890 
891    OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
892    OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) | A6XX_RB_WINDOW_OFFSET_Y(0));
893 
894    OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
895    OUT_RING(ring,
896             A6XX_SP_TP_WINDOW_OFFSET_X(0) | A6XX_SP_TP_WINDOW_OFFSET_Y(0));
897 
898    /* emit IB to binning drawcmds: */
899    trace_start_binning_ib(&batch->trace, ring);
900    foreach_subpass (subpass, batch) {
901       emit_lrz(batch, subpass);
902       fd6_emit_ib(ring, subpass->draw);
903    }
904    trace_end_binning_ib(&batch->trace, ring);
905 
906    OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
907    OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
908                      CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
909                      CP_SET_DRAW_STATE__0_GROUP_ID(0));
910    OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
911    OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
912 
913    OUT_PKT7(ring, CP_EVENT_WRITE, 1);
914    OUT_RING(ring, UNK_2D);
915 
916    /* This flush is probably required because the VSC, which produces the
917     * visibility stream, is a client of UCHE, whereas the CP needs to read
918     * the visibility stream (without caching) to do draw skipping. The
919     * WFI+WAIT_FOR_ME combination guarantees that the binning commands
920     * submitted are finished before reading the VSC regs (in
921     * emit_vsc_overflow_test) or the VSC_DATA buffer directly (implicitly
922     * as part of draws).
923     */
924    fd6_emit_flushes(batch->ctx, ring,
925                     FD6_FLUSH_CACHE |
926                     FD6_WAIT_FOR_IDLE |
927                     FD6_WAIT_FOR_ME);
928 
929    trace_start_vsc_overflow_test(&batch->trace, batch->gmem);
930    emit_vsc_overflow_test(batch);
931    trace_end_vsc_overflow_test(&batch->trace, batch->gmem);
932 
933    OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
934    OUT_RING(ring, 0x0);
935 
936    OUT_PKT7(ring, CP_SET_MODE, 1);
937    OUT_RING(ring, 0x0);
938 
939    OUT_WFI5(ring);
940 
941    fd6_emit_ccu_cntl(ring, screen, true);
942 }
943 
944 static void
emit_msaa(struct fd_ringbuffer * ring,unsigned nr)945 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
946 {
947    enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
948 
949    OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
950    OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
951    OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
952                      COND(samples == MSAA_ONE,
953                           A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
954 
955    OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
956    OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
957    OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
958                      COND(samples == MSAA_ONE,
959                           A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
960 
961    OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
962    OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
963    OUT_RING(ring,
964             A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
965                COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
966 
967    OUT_PKT4(ring, REG_A6XX_RB_BLIT_GMEM_MSAA_CNTL, 1);
968    OUT_RING(ring, A6XX_RB_BLIT_GMEM_MSAA_CNTL_SAMPLES(samples));
969 }
970 
971 static void prepare_tile_setup(struct fd_batch *batch);
972 template <chip CHIP>
973 static void prepare_tile_fini(struct fd_batch *batch);
974 
975 /* before first tile */
976 template <chip CHIP>
977 static void
fd6_emit_tile_init(struct fd_batch * batch)978 fd6_emit_tile_init(struct fd_batch *batch) assert_dt
979 {
980    struct fd_ringbuffer *ring = batch->gmem;
981    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
982    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
983    struct fd_screen *screen = batch->ctx->screen;
984 
985    emit_lrz_clears<CHIP>(batch);
986 
987    fd6_emit_restore<CHIP>(batch, ring);
988 
989    fd6_emit_lrz_flush(ring);
990 
991    if (batch->prologue) {
992       trace_start_prologue(&batch->trace, ring);
993       fd6_emit_ib(ring, batch->prologue);
994       trace_end_prologue(&batch->trace, ring);
995    }
996 
997    fd6_cache_inv(batch, ring);
998 
999    prepare_tile_setup(batch);
1000    prepare_tile_fini<CHIP>(batch);
1001 
1002    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1003    OUT_RING(ring, 0x0);
1004 
1005    /* blob controls "local" in IB2, but I think that is not required */
1006    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1007    OUT_RING(ring, 0x1);
1008 
1009    OUT_WFI5(ring);
1010    fd6_emit_ccu_cntl(ring, screen, true);
1011 
1012    emit_zs<CHIP>(ring, pfb->zsbuf, batch->gmem_state);
1013    emit_mrt<CHIP>(ring, pfb, batch->gmem_state);
1014    emit_msaa(ring, pfb->samples);
1015    patch_fb_read_gmem(batch);
1016 
1017    if (use_hw_binning(batch)) {
1018       /* enable stream-out during binning pass: */
1019       OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
1020 
1021       set_bin_size<CHIP>(ring, gmem, {
1022             .render_mode = BINNING_PASS,
1023             .buffers_location = BUFFERS_IN_GMEM,
1024             .lrz_feedback_zmode_mask = 0x6,
1025       });
1026       update_render_cntl<CHIP>(batch, pfb, true);
1027       emit_binning_pass(batch);
1028 
1029       /* and disable stream-out for draw pass: */
1030       OUT_REG(ring, A6XX_VPC_SO_DISABLE(true));
1031 
1032       /*
1033        * NOTE: even if we detect VSC overflow and disable use of
1034        * visibility stream in draw pass, it is still safe to execute
1035        * the reset of these cmds:
1036        */
1037 
1038       // NOTE a618 not setting .FORCE_LRZ_WRITE_DIS ..
1039       set_bin_size<CHIP>(ring, gmem, {
1040             .render_mode = RENDERING_PASS,
1041             .force_lrz_write_dis = true,
1042             .buffers_location = BUFFERS_IN_GMEM,
1043             .lrz_feedback_zmode_mask = 0x6,
1044       });
1045 
1046       OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
1047       OUT_RING(ring, 0x0);
1048 
1049       OUT_PKT4(ring, REG_A6XX_PC_POWER_CNTL, 1);
1050       OUT_RING(ring, screen->info->a6xx.magic.PC_POWER_CNTL);
1051 
1052       OUT_PKT4(ring, REG_A6XX_VFD_POWER_CNTL, 1);
1053       OUT_RING(ring, screen->info->a6xx.magic.PC_POWER_CNTL);
1054 
1055       OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1056       OUT_RING(ring, 0x1);
1057    } else {
1058       /* no binning pass, so enable stream-out for draw pass:: */
1059       OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
1060 
1061       set_bin_size<CHIP>(ring, gmem, {
1062             .render_mode = RENDERING_PASS,
1063             .buffers_location = BUFFERS_IN_GMEM,
1064             .lrz_feedback_zmode_mask = 0x6,
1065       });
1066    }
1067 
1068    update_render_cntl<CHIP>(batch, pfb, false);
1069 
1070    emit_common_init(batch);
1071 }
1072 
1073 template <chip CHIP>
1074 static void
set_window_offset(struct fd_ringbuffer * ring,uint32_t x1,uint32_t y1)1075 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
1076 {
1077    OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
1078    OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) | A6XX_RB_WINDOW_OFFSET_Y(y1));
1079 
1080    OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
1081    OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) | A6XX_RB_WINDOW_OFFSET2_Y(y1));
1082 
1083    OUT_REG(ring, SP_WINDOW_OFFSET(CHIP, .x = x1, .y = y1));
1084 
1085    OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
1086    OUT_RING(ring,
1087             A6XX_SP_TP_WINDOW_OFFSET_X(x1) | A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
1088 }
1089 
1090 /* before mem2gmem */
1091 template <chip CHIP>
1092 static void
fd6_emit_tile_prep(struct fd_batch * batch,const struct fd_tile * tile)1093 fd6_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
1094 {
1095    struct fd_context *ctx = batch->ctx;
1096    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1097    struct fd6_context *fd6_ctx = fd6_context(ctx);
1098    struct fd_ringbuffer *ring = batch->gmem;
1099 
1100    emit_marker6(ring, 7);
1101    OUT_PKT7(ring, CP_SET_MARKER, 1);
1102    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
1103    emit_marker6(ring, 7);
1104 
1105    uint32_t x1 = tile->xoff;
1106    uint32_t y1 = tile->yoff;
1107    uint32_t x2 = tile->xoff + tile->bin_w - 1;
1108    uint32_t y2 = tile->yoff + tile->bin_h - 1;
1109 
1110    set_scissor(ring, x1, y1, x2, y2);
1111 
1112    if (use_hw_binning(batch)) {
1113       const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
1114       unsigned num_vsc_pipes = ctx->screen->info->num_vsc_pipes;
1115 
1116       OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
1117 
1118       OUT_PKT7(ring, CP_SET_MODE, 1);
1119       OUT_RING(ring, 0x0);
1120 
1121       OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
1122       OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
1123                         CP_SET_BIN_DATA5_0_VSC_N(tile->n));
1124       OUT_RELOC(ring, fd6_ctx->vsc_draw_strm, /* per-pipe draw-stream address */
1125                 (tile->p * fd6_ctx->vsc_draw_strm_pitch), 0, 0);
1126       OUT_RELOC(
1127          ring, fd6_ctx->vsc_draw_strm, /* VSC_DRAW_STRM_ADDRESS + (p * 4) */
1128          (tile->p * 4) + (num_vsc_pipes * fd6_ctx->vsc_draw_strm_pitch),
1129          0, 0);
1130       OUT_RELOC(ring, fd6_ctx->vsc_prim_strm,
1131                 (tile->p * fd6_ctx->vsc_prim_strm_pitch), 0, 0);
1132 
1133       OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1134       OUT_RING(ring, 0x0);
1135 
1136       set_window_offset<CHIP>(ring, x1, y1);
1137 
1138       const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1139       set_bin_size<CHIP>(ring, gmem, {
1140             .render_mode = RENDERING_PASS,
1141             .buffers_location = BUFFERS_IN_GMEM,
1142             .lrz_feedback_zmode_mask = 0x6,
1143       });
1144 
1145       OUT_PKT7(ring, CP_SET_MODE, 1);
1146       OUT_RING(ring, 0x0);
1147    } else {
1148       set_window_offset<CHIP>(ring, x1, y1);
1149 
1150       OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1151       OUT_RING(ring, 0x1);
1152 
1153       OUT_PKT7(ring, CP_SET_MODE, 1);
1154       OUT_RING(ring, 0x0);
1155    }
1156 }
1157 
1158 static void
set_blit_scissor(struct fd_batch * batch,struct fd_ringbuffer * ring)1159 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
1160 {
1161    const struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1162 
1163    struct pipe_scissor_state blit_scissor;
1164 
1165    blit_scissor.minx = 0;
1166    blit_scissor.miny = 0;
1167    blit_scissor.maxx = ALIGN(pfb->width, 16);
1168    blit_scissor.maxy = ALIGN(pfb->height, 4);
1169 
1170    OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
1171    OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
1172                      A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
1173    OUT_RING(ring, A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
1174                      A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
1175 }
1176 
1177 static void
emit_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,bool stencil)1178 emit_blit(struct fd_batch *batch, struct fd_ringbuffer *ring, uint32_t base,
1179           struct pipe_surface *psurf, bool stencil)
1180 {
1181    struct fd_resource *rsc = fd_resource(psurf->texture);
1182    enum pipe_format pfmt = psurf->format;
1183    uint32_t offset;
1184    bool ubwc_enabled;
1185 
1186    assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
1187 
1188    /* separate stencil case: */
1189    if (stencil) {
1190       rsc = rsc->stencil;
1191       pfmt = rsc->b.b.format;
1192    }
1193 
1194    offset =
1195       fd_resource_offset(rsc, psurf->u.tex.level, psurf->u.tex.first_layer);
1196    ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
1197 
1198    assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
1199 
1200    enum a6xx_tile_mode tile_mode = (enum a6xx_tile_mode)
1201          fd_resource_tile_mode(&rsc->b.b, psurf->u.tex.level);
1202    enum a6xx_format format = fd6_color_format(pfmt, tile_mode);
1203    uint32_t stride = fd_resource_pitch(rsc, psurf->u.tex.level);
1204    uint32_t array_stride = fd_resource_layer_stride(rsc, psurf->u.tex.level);
1205    enum a3xx_color_swap swap =
1206          fd6_color_swap(pfmt, (enum a6xx_tile_mode)rsc->layout.tile_mode);
1207    enum a3xx_msaa_samples samples = fd_msaa_samples(rsc->b.b.nr_samples);
1208 
1209    OUT_REG(ring,
1210            A6XX_RB_BLIT_DST_INFO(
1211                  .tile_mode = tile_mode,
1212                  .flags = ubwc_enabled,
1213                  .samples = samples,
1214                  .color_swap = swap,
1215                  .color_format = format,
1216            ),
1217            A6XX_RB_BLIT_DST(.bo = rsc->bo, .bo_offset = offset),
1218            A6XX_RB_BLIT_DST_PITCH(stride),
1219            A6XX_RB_BLIT_DST_ARRAY_PITCH(array_stride));
1220 
1221    OUT_REG(ring, A6XX_RB_BLIT_BASE_GMEM(.dword = base));
1222 
1223    if (ubwc_enabled) {
1224       OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST, 3);
1225       fd6_emit_flag_reference(ring, rsc, psurf->u.tex.level,
1226                               psurf->u.tex.first_layer);
1227    }
1228 
1229    fd6_emit_blit(batch, ring);
1230 }
1231 
1232 static void
emit_restore_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,unsigned buffer)1233 emit_restore_blit(struct fd_batch *batch, struct fd_ringbuffer *ring,
1234                   uint32_t base, struct pipe_surface *psurf, unsigned buffer)
1235 {
1236    bool stencil = (buffer == FD_BUFFER_STENCIL);
1237 
1238    OUT_REG(ring,
1239            A6XX_RB_BLIT_INFO(
1240                  .unk0 = true,
1241                  .gmem = true,
1242                  .sample_0 = util_format_is_pure_integer(psurf->format),
1243                  .depth = (buffer == FD_BUFFER_DEPTH),
1244            ),
1245    );
1246 
1247    emit_blit(batch, ring, base, psurf, stencil);
1248 }
1249 
1250 static void
emit_subpass_clears(struct fd_batch * batch,struct fd_batch_subpass * subpass)1251 emit_subpass_clears(struct fd_batch *batch, struct fd_batch_subpass *subpass)
1252 {
1253    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1254    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1255    struct fd_ringbuffer *ring = subpass->subpass_clears;
1256    enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
1257 
1258    uint32_t buffers = subpass->fast_cleared;
1259 
1260    if (buffers & PIPE_CLEAR_COLOR) {
1261 
1262       for (int i = 0; i < pfb->nr_cbufs; i++) {
1263          union pipe_color_union *color = &subpass->clear_color[i];
1264          union util_color uc = {0};
1265 
1266          if (!pfb->cbufs[i])
1267             continue;
1268 
1269          if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1270             continue;
1271 
1272          enum pipe_format pfmt = pfb->cbufs[i]->format;
1273 
1274          // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
1275          union pipe_color_union swapped;
1276          switch (fd6_color_swap(pfmt, TILE6_LINEAR)) {
1277          case WZYX:
1278             swapped.ui[0] = color->ui[0];
1279             swapped.ui[1] = color->ui[1];
1280             swapped.ui[2] = color->ui[2];
1281             swapped.ui[3] = color->ui[3];
1282             break;
1283          case WXYZ:
1284             swapped.ui[2] = color->ui[0];
1285             swapped.ui[1] = color->ui[1];
1286             swapped.ui[0] = color->ui[2];
1287             swapped.ui[3] = color->ui[3];
1288             break;
1289          case ZYXW:
1290             swapped.ui[3] = color->ui[0];
1291             swapped.ui[0] = color->ui[1];
1292             swapped.ui[1] = color->ui[2];
1293             swapped.ui[2] = color->ui[3];
1294             break;
1295          case XYZW:
1296             swapped.ui[3] = color->ui[0];
1297             swapped.ui[2] = color->ui[1];
1298             swapped.ui[1] = color->ui[2];
1299             swapped.ui[0] = color->ui[3];
1300             break;
1301          }
1302 
1303          util_pack_color_union(pfmt, &uc, &swapped);
1304 
1305          OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1306          OUT_RING(ring,
1307                   A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1308                      A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1309                      A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_color_format(pfmt, TILE6_LINEAR)));
1310 
1311          OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1312          OUT_RING(ring,
1313                   A6XX_RB_BLIT_INFO_GMEM | A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
1314 
1315          OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1316          OUT_RING(ring, gmem->cbuf_base[i]);
1317 
1318          OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1319          OUT_RING(ring, 0);
1320 
1321          OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
1322          OUT_RING(ring, uc.ui[0]);
1323          OUT_RING(ring, uc.ui[1]);
1324          OUT_RING(ring, uc.ui[2]);
1325          OUT_RING(ring, uc.ui[3]);
1326 
1327          fd6_emit_blit(batch, ring);
1328       }
1329    }
1330 
1331    const bool has_depth = pfb->zsbuf;
1332    const bool has_separate_stencil =
1333       has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
1334 
1335    /* First clear depth or combined depth/stencil. */
1336    if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1337        (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1338       enum pipe_format pfmt = pfb->zsbuf->format;
1339       uint32_t clear_value;
1340       uint32_t mask = 0;
1341 
1342       if (has_separate_stencil) {
1343          pfmt = util_format_get_depth_only(pfb->zsbuf->format);
1344          clear_value = util_pack_z(pfmt, subpass->clear_depth);
1345       } else {
1346          pfmt = pfb->zsbuf->format;
1347          clear_value =
1348             util_pack_z_stencil(pfmt, subpass->clear_depth, subpass->clear_stencil);
1349       }
1350 
1351       if (buffers & PIPE_CLEAR_DEPTH)
1352          mask |= 0x1;
1353 
1354       if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
1355          mask |= 0x2;
1356 
1357       OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1358       OUT_RING(ring,
1359                A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1360                   A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1361                   A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_color_format(pfmt, TILE6_LINEAR)));
1362 
1363       OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1364       OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1365                         // XXX UNK0 for separate stencil ??
1366                         A6XX_RB_BLIT_INFO_DEPTH |
1367                         A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
1368 
1369       OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1370       OUT_RING(ring, gmem->zsbuf_base[0]);
1371 
1372       OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1373       OUT_RING(ring, 0);
1374 
1375       OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1376       OUT_RING(ring, clear_value);
1377 
1378       fd6_emit_blit(batch, ring);
1379    }
1380 
1381    /* Then clear the separate stencil buffer in case of 32 bit depth
1382     * formats with separate stencil. */
1383    if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1384       OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1385       OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1386                         A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1387                         A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(FMT6_8_UINT));
1388 
1389       OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1390       OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1391                         // A6XX_RB_BLIT_INFO_UNK0 |
1392                         A6XX_RB_BLIT_INFO_DEPTH |
1393                         A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1394 
1395       OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1396       OUT_RING(ring, gmem->zsbuf_base[1]);
1397 
1398       OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1399       OUT_RING(ring, 0);
1400 
1401       OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1402       OUT_RING(ring, subpass->clear_stencil & 0xff);
1403 
1404       fd6_emit_blit(batch, ring);
1405    }
1406 }
1407 
1408 /*
1409  * transfer from system memory to gmem
1410  */
1411 static void
emit_restore_blits(struct fd_batch * batch,struct fd_ringbuffer * ring)1412 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
1413 {
1414    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1415    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1416 
1417    if (batch->restore & FD_BUFFER_COLOR) {
1418       unsigned i;
1419       for (i = 0; i < pfb->nr_cbufs; i++) {
1420          if (!pfb->cbufs[i])
1421             continue;
1422          if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
1423             continue;
1424          emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1425                            FD_BUFFER_COLOR);
1426       }
1427    }
1428 
1429    if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1430       struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1431 
1432       if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
1433          emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
1434                            FD_BUFFER_DEPTH);
1435       }
1436       if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
1437          emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
1438                            FD_BUFFER_STENCIL);
1439       }
1440    }
1441 }
1442 
1443 static void
prepare_tile_setup(struct fd_batch * batch)1444 prepare_tile_setup(struct fd_batch *batch)
1445 {
1446    if (batch->restore) {
1447       batch->tile_loads =
1448          fd_submit_new_ringbuffer(batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
1449 
1450       set_blit_scissor(batch, batch->tile_loads);
1451       emit_restore_blits(batch, batch->tile_loads);
1452    }
1453 
1454    foreach_subpass (subpass, batch) {
1455       if (!subpass->fast_cleared)
1456          continue;
1457 
1458       subpass->subpass_clears =
1459          fd_submit_new_ringbuffer(batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
1460 
1461       set_blit_scissor(batch, subpass->subpass_clears);
1462       emit_subpass_clears(batch, subpass);
1463    }
1464 }
1465 
1466 /*
1467  * transfer from system memory to gmem
1468  */
1469 static void
fd6_emit_tile_mem2gmem(struct fd_batch * batch,const struct fd_tile * tile)1470 fd6_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
1471 {
1472 }
1473 
1474 /* before IB to rendering cmds: */
1475 static void
fd6_emit_tile_renderprep(struct fd_batch * batch,const struct fd_tile * tile)1476 fd6_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
1477 {
1478    if (batch->tile_loads) {
1479       trace_start_tile_loads(&batch->trace, batch->gmem, batch->restore);
1480       emit_conditional_ib(batch, tile, batch->tile_loads);
1481       trace_end_tile_loads(&batch->trace, batch->gmem);
1482    }
1483 }
1484 
1485 static bool
blit_can_resolve(enum pipe_format format)1486 blit_can_resolve(enum pipe_format format)
1487 {
1488    const struct util_format_description *desc = util_format_description(format);
1489 
1490    /* blit event can only do resolve for simple cases:
1491     * averaging samples as unsigned integers or choosing only one sample
1492     */
1493    if (util_format_is_snorm(format) || util_format_is_srgb(format))
1494       return false;
1495 
1496    /* can't do formats with larger channel sizes
1497     * note: this includes all float formats
1498     * note2: single channel integer formats seem OK
1499     */
1500    if (desc->channel[0].size > 10)
1501       return false;
1502 
1503    switch (format) {
1504    /* for unknown reasons blit event can't msaa resolve these formats when tiled
1505     * likely related to these formats having different layout from other cpp=2
1506     * formats
1507     */
1508    case PIPE_FORMAT_R8G8_UNORM:
1509    case PIPE_FORMAT_R8G8_UINT:
1510    case PIPE_FORMAT_R8G8_SINT:
1511    case PIPE_FORMAT_R8G8_SRGB:
1512    /* TODO: this one should be able to work? */
1513    case PIPE_FORMAT_Z24_UNORM_S8_UINT:
1514       return false;
1515    default:
1516       break;
1517    }
1518 
1519    return true;
1520 }
1521 
1522 static bool
needs_resolve(struct pipe_surface * psurf)1523 needs_resolve(struct pipe_surface *psurf)
1524 {
1525    return psurf->nr_samples &&
1526           (psurf->nr_samples != psurf->texture->nr_samples);
1527 }
1528 
1529 /**
1530  * Returns the UNKNOWN_8C01 value for handling partial depth/stencil
1531  * clear/stores to Z24S8.
1532  */
1533 static uint32_t
fd6_unknown_8c01(enum pipe_format format,unsigned buffers)1534 fd6_unknown_8c01(enum pipe_format format, unsigned buffers)
1535 {
1536    if (format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
1537       if (buffers == FD_BUFFER_DEPTH)
1538          return 0x08000041;
1539       else if (buffers == FD_BUFFER_STENCIL)
1540          return 0x00084001;
1541    }
1542    return 0;
1543 }
1544 
1545 template <chip CHIP>
1546 static void
emit_resolve_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,unsigned buffer)1547 emit_resolve_blit(struct fd_batch *batch, struct fd_ringbuffer *ring,
1548                   uint32_t base, struct pipe_surface *psurf,
1549                   unsigned buffer) assert_dt
1550 {
1551    uint32_t info = 0;
1552    bool stencil = false;
1553 
1554    if (!fd_resource(psurf->texture)->valid)
1555       return;
1556 
1557    /* if we need to resolve, but cannot with BLIT event, we instead need
1558     * to generate per-tile CP_BLIT (r2d) commands:
1559     *
1560     * The separate-stencil is a special case, we might need to use CP_BLIT
1561     * for depth, but we can still resolve stencil with a BLIT event
1562     */
1563    if (needs_resolve(psurf) && !blit_can_resolve(psurf->format) &&
1564        (buffer != FD_BUFFER_STENCIL)) {
1565       /* We could potentially use fd6_unknown_8c01() to handle partial z/s
1566        * resolve to packed z/s, but we would need a corresponding ability in the
1567        * !resolve case below, so batch_draw_tracking_for_dirty_bits() has us
1568        * just do a restore of the other channel for partial packed z/s writes.
1569        */
1570       fd6_resolve_tile<CHIP>(batch, ring, base, psurf, 0);
1571       return;
1572    }
1573 
1574    switch (buffer) {
1575    case FD_BUFFER_COLOR:
1576       break;
1577    case FD_BUFFER_STENCIL:
1578       info |= A6XX_RB_BLIT_INFO_UNK0;
1579       stencil = true;
1580       break;
1581    case FD_BUFFER_DEPTH:
1582       info |= A6XX_RB_BLIT_INFO_DEPTH;
1583       break;
1584    }
1585 
1586    if (util_format_is_pure_integer(psurf->format) ||
1587        util_format_is_depth_or_stencil(psurf->format))
1588       info |= A6XX_RB_BLIT_INFO_SAMPLE_0;
1589 
1590    OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1591    OUT_RING(ring, info);
1592 
1593    emit_blit(batch, ring, base, psurf, stencil);
1594 }
1595 
1596 /*
1597  * transfer from gmem to system memory (ie. normal RAM)
1598  */
1599 
1600 template <chip CHIP>
1601 static void
prepare_tile_fini(struct fd_batch * batch)1602 prepare_tile_fini(struct fd_batch *batch)
1603    assert_dt
1604 {
1605    const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1606    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1607    struct fd_ringbuffer *ring;
1608 
1609    batch->tile_store =
1610       fd_submit_new_ringbuffer(batch->submit, 0x1000, FD_RINGBUFFER_STREAMING);
1611    ring = batch->tile_store;
1612 
1613    set_blit_scissor(batch, ring);
1614 
1615    if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1616       struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1617 
1618       if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1619          emit_resolve_blit<CHIP>(batch, ring, gmem->zsbuf_base[0],
1620                                  pfb->zsbuf, FD_BUFFER_DEPTH);
1621       }
1622       if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1623          emit_resolve_blit<CHIP>(batch, ring, gmem->zsbuf_base[1],
1624                                  pfb->zsbuf, FD_BUFFER_STENCIL);
1625       }
1626    }
1627 
1628    if (batch->resolve & FD_BUFFER_COLOR) {
1629       unsigned i;
1630       for (i = 0; i < pfb->nr_cbufs; i++) {
1631          if (!pfb->cbufs[i])
1632             continue;
1633          if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1634             continue;
1635          emit_resolve_blit<CHIP>(batch, ring, gmem->cbuf_base[i],
1636                                  pfb->cbufs[i], FD_BUFFER_COLOR);
1637       }
1638    }
1639 }
1640 
1641 static void
fd6_emit_tile(struct fd_batch * batch,const struct fd_tile * tile)1642 fd6_emit_tile(struct fd_batch *batch, const struct fd_tile *tile)
1643 {
1644    foreach_subpass (subpass, batch) {
1645       if (subpass->subpass_clears) {
1646          trace_start_clears(&batch->trace, batch->gmem, subpass->fast_cleared);
1647          emit_conditional_ib(batch, tile, subpass->subpass_clears);
1648          trace_end_clears(&batch->trace, batch->gmem);
1649       }
1650 
1651       emit_lrz(batch, subpass);
1652 
1653       fd6_emit_ib(batch->gmem, subpass->draw);
1654    }
1655 
1656    if (batch->tile_epilogue)
1657       fd6_emit_ib(batch->gmem, batch->tile_epilogue);
1658 }
1659 
1660 static void
fd6_emit_tile_gmem2mem(struct fd_batch * batch,const struct fd_tile * tile)1661 fd6_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
1662 {
1663    struct fd_ringbuffer *ring = batch->gmem;
1664 
1665    if (batch->epilogue)
1666       fd6_emit_ib(batch->gmem, batch->epilogue);
1667 
1668    if (use_hw_binning(batch)) {
1669       OUT_PKT7(ring, CP_SET_MARKER, 1);
1670       OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1671    }
1672 
1673    OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1674    OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1675                      CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1676                      CP_SET_DRAW_STATE__0_GROUP_ID(0));
1677    OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1678    OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1679 
1680    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1681    OUT_RING(ring, 0x0);
1682 
1683    emit_marker6(ring, 7);
1684    OUT_PKT7(ring, CP_SET_MARKER, 1);
1685    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
1686    emit_marker6(ring, 7);
1687 
1688    if (batch->tile_store) {
1689       trace_start_tile_stores(&batch->trace, batch->gmem, batch->resolve);
1690       emit_conditional_ib(batch, tile, batch->tile_store);
1691       trace_end_tile_stores(&batch->trace, batch->gmem);
1692    }
1693 }
1694 
1695 static void
fd6_emit_tile_fini(struct fd_batch * batch)1696 fd6_emit_tile_fini(struct fd_batch *batch)
1697 {
1698    struct fd_ringbuffer *ring = batch->gmem;
1699 
1700    emit_common_fini(batch);
1701 
1702    OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1703    OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE);
1704 
1705    fd6_emit_lrz_flush(ring);
1706 
1707    fd6_event_write(batch, ring, PC_CCU_RESOLVE_TS, true);
1708 
1709    if (use_hw_binning(batch)) {
1710       check_vsc_overflow(batch->ctx);
1711    }
1712 }
1713 
1714 template <chip CHIP>
1715 static void
emit_sysmem_clears(struct fd_batch * batch,struct fd_batch_subpass * subpass)1716 emit_sysmem_clears(struct fd_batch *batch, struct fd_batch_subpass *subpass)
1717    assert_dt
1718 {
1719    struct fd_context *ctx = batch->ctx;
1720    struct fd_ringbuffer *ring = batch->gmem;
1721    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1722 
1723    uint32_t buffers = subpass->fast_cleared;
1724 
1725    if (!buffers)
1726       return;
1727 
1728    struct pipe_box box2d;
1729    u_box_2d(0, 0, pfb->width, pfb->height, &box2d);
1730 
1731    trace_start_clears(&batch->trace, ring, buffers);
1732 
1733    if (buffers & PIPE_CLEAR_COLOR) {
1734       for (int i = 0; i < pfb->nr_cbufs; i++) {
1735          union pipe_color_union color = subpass->clear_color[i];
1736 
1737          if (!pfb->cbufs[i])
1738             continue;
1739 
1740          if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1741             continue;
1742 
1743          fd6_clear_surface<CHIP>(ctx, ring, pfb->cbufs[i], &box2d, &color, 0);
1744       }
1745    }
1746    if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
1747       union pipe_color_union value = {};
1748 
1749       const bool has_depth = pfb->zsbuf;
1750       struct pipe_resource *separate_stencil =
1751          has_depth && fd_resource(pfb->zsbuf->texture)->stencil
1752             ? &fd_resource(pfb->zsbuf->texture)->stencil->b.b
1753             : NULL;
1754 
1755       if ((buffers & PIPE_CLEAR_DEPTH) || (!separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1756          value.f[0] = subpass->clear_depth;
1757          value.ui[1] = subpass->clear_stencil;
1758          fd6_clear_surface<CHIP>(ctx, ring, pfb->zsbuf, &box2d,
1759                                  &value, fd6_unknown_8c01(pfb->zsbuf->format, buffers));
1760       }
1761 
1762       if (separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1763          value.ui[0] = subpass->clear_stencil;
1764 
1765          struct pipe_surface stencil_surf = *pfb->zsbuf;
1766          stencil_surf.format = PIPE_FORMAT_S8_UINT;
1767          stencil_surf.texture = separate_stencil;
1768 
1769          fd6_clear_surface<CHIP>(ctx, ring, &stencil_surf, &box2d, &value, 0);
1770       }
1771    }
1772 
1773    fd6_emit_flushes(ctx, ring, FD6_FLUSH_CCU_COLOR);
1774 
1775    trace_end_clears(&batch->trace, ring);
1776 }
1777 
1778 template <chip CHIP>
1779 static void
fd6_emit_sysmem_prep(struct fd_batch * batch)1780 fd6_emit_sysmem_prep(struct fd_batch *batch) assert_dt
1781 {
1782    struct fd_ringbuffer *ring = batch->gmem;
1783 
1784    emit_lrz_clears<CHIP>(batch);
1785 
1786    fd6_emit_restore<CHIP>(batch, ring);
1787    fd6_emit_lrz_flush(ring);
1788 
1789    if (batch->prologue) {
1790       if (!batch->nondraw) {
1791          trace_start_prologue(&batch->trace, ring);
1792       }
1793       fd6_emit_ib(ring, batch->prologue);
1794       if (!batch->nondraw) {
1795          trace_end_prologue(&batch->trace, ring);
1796       }
1797    }
1798 
1799    /* remaining setup below here does not apply to blit/compute: */
1800    if (batch->nondraw)
1801       return;
1802 
1803    struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1804 
1805    if (pfb->width > 0 && pfb->height > 0)
1806       set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1807    else
1808       set_scissor(ring, 0, 0, 0, 0);
1809 
1810    set_window_offset<CHIP>(ring, 0, 0);
1811 
1812    set_bin_size<CHIP>(ring, NULL, {
1813          .render_mode = RENDERING_PASS,
1814          .buffers_location = BUFFERS_IN_SYSMEM,
1815    });
1816 
1817    emit_marker6(ring, 7);
1818    OUT_PKT7(ring, CP_SET_MARKER, 1);
1819    OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1820    emit_marker6(ring, 7);
1821 
1822    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1823    OUT_RING(ring, 0x0);
1824 
1825    /* blob controls "local" in IB2, but I think that is not required */
1826    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1827    OUT_RING(ring, 0x1);
1828 
1829    /* enable stream-out, with sysmem there is only one pass: */
1830    OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
1831 
1832    OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1833    OUT_RING(ring, 0x1);
1834 
1835    emit_zs<CHIP>(ring, pfb->zsbuf, NULL);
1836    emit_mrt<CHIP>(ring, pfb, NULL);
1837    emit_msaa(ring, pfb->samples);
1838    patch_fb_read_sysmem(batch);
1839 
1840    emit_common_init(batch);
1841 }
1842 
1843 template <chip CHIP>
1844 static void
fd6_emit_sysmem(struct fd_batch * batch)1845 fd6_emit_sysmem(struct fd_batch *batch)
1846    assert_dt
1847 {
1848    struct fd_ringbuffer *ring = batch->gmem;
1849    struct fd_screen *screen = batch->ctx->screen;
1850 
1851    foreach_subpass (subpass, batch) {
1852       if (subpass->fast_cleared) {
1853          unsigned flushes = 0;
1854          if (subpass->fast_cleared & FD_BUFFER_COLOR)
1855             flushes |= FD6_INVALIDATE_CCU_COLOR;
1856          if (subpass->fast_cleared & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
1857             flushes |= FD6_INVALIDATE_CCU_DEPTH;
1858 
1859          fd6_emit_flushes(batch->ctx, ring, flushes);
1860          emit_sysmem_clears<CHIP>(batch, subpass);
1861       }
1862 
1863       OUT_WFI5(ring);
1864       fd6_emit_ccu_cntl(ring, screen, false);
1865 
1866       struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1867       update_render_cntl<CHIP>(batch, pfb, false);
1868 
1869       emit_lrz(batch, subpass);
1870 
1871       fd6_emit_ib(ring, subpass->draw);
1872    }
1873 }
1874 
1875 static void
fd6_emit_sysmem_fini(struct fd_batch * batch)1876 fd6_emit_sysmem_fini(struct fd_batch *batch) assert_dt
1877 {
1878    struct fd_ringbuffer *ring = batch->gmem;
1879 
1880    emit_common_fini(batch);
1881 
1882    if (batch->tile_epilogue)
1883       fd6_emit_ib(batch->gmem, batch->tile_epilogue);
1884 
1885    if (batch->epilogue)
1886       fd6_emit_ib(batch->gmem, batch->epilogue);
1887 
1888    OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1889    OUT_RING(ring, 0x0);
1890 
1891    fd6_emit_lrz_flush(ring);
1892 
1893    fd6_emit_flushes(batch->ctx, ring,
1894                     FD6_FLUSH_CCU_COLOR |
1895                     FD6_FLUSH_CCU_DEPTH);
1896 }
1897 
1898 template <chip CHIP>
1899 void
fd6_gmem_init(struct pipe_context * pctx)1900 fd6_gmem_init(struct pipe_context *pctx)
1901    disable_thread_safety_analysis
1902 {
1903    struct fd_context *ctx = fd_context(pctx);
1904 
1905    ctx->emit_tile_init = fd6_emit_tile_init<CHIP>;
1906    ctx->emit_tile_prep = fd6_emit_tile_prep<CHIP>;
1907    ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1908    ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1909    ctx->emit_tile = fd6_emit_tile;
1910    ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1911    ctx->emit_tile_fini = fd6_emit_tile_fini;
1912    ctx->emit_sysmem_prep = fd6_emit_sysmem_prep<CHIP>;
1913    ctx->emit_sysmem = fd6_emit_sysmem<CHIP>;
1914    ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1915 }
1916 
1917 /* Teach the compiler about needed variants: */
1918 template void fd6_gmem_init<A6XX>(struct pipe_context *pctx);
1919 template void fd6_gmem_init<A7XX>(struct pipe_context *pctx);
1920