1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #include <stdio.h>
29
30 #include "pipe/p_state.h"
31 #include "util/u_string.h"
32 #include "util/u_memory.h"
33 #include "util/u_inlines.h"
34 #include "util/format/u_format.h"
35
36 #include "freedreno_draw.h"
37 #include "freedreno_log.h"
38 #include "freedreno_state.h"
39 #include "freedreno_resource.h"
40
41 #include "fd6_blitter.h"
42 #include "fd6_gmem.h"
43 #include "fd6_context.h"
44 #include "fd6_draw.h"
45 #include "fd6_emit.h"
46 #include "fd6_program.h"
47 #include "fd6_format.h"
48 #include "fd6_resource.h"
49 #include "fd6_zsa.h"
50 #include "fd6_pack.h"
51
52 /**
53 * Emits the flags registers, suitable for RB_MRT_FLAG_BUFFER,
54 * RB_DEPTH_FLAG_BUFFER, SP_PS_2D_SRC_FLAGS, and RB_BLIT_FLAG_DST.
55 */
56 void
fd6_emit_flag_reference(struct fd_ringbuffer * ring,struct fd_resource * rsc,int level,int layer)57 fd6_emit_flag_reference(struct fd_ringbuffer *ring, struct fd_resource *rsc,
58 int level, int layer)
59 {
60 if (fd_resource_ubwc_enabled(rsc, level)) {
61 OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0, 0);
62 OUT_RING(ring,
63 A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(fdl_ubwc_pitch(&rsc->layout, level)) |
64 A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(rsc->layout.ubwc_layer_size >> 2));
65 } else {
66 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_LO */
67 OUT_RING(ring, 0x00000000); /* RB_MRT_FLAG_BUFFER[i].ADDR_HI */
68 OUT_RING(ring, 0x00000000);
69 }
70 }
71
72 static void
emit_mrt(struct fd_ringbuffer * ring,struct pipe_framebuffer_state * pfb,const struct fd_gmem_stateobj * gmem)73 emit_mrt(struct fd_ringbuffer *ring, struct pipe_framebuffer_state *pfb,
74 const struct fd_gmem_stateobj *gmem)
75 {
76 unsigned char mrt_comp[A6XX_MAX_RENDER_TARGETS] = {0};
77 unsigned srgb_cntl = 0;
78 unsigned i;
79
80 unsigned max_layer_index = 0;
81
82 for (i = 0; i < pfb->nr_cbufs; i++) {
83 enum a6xx_format format = 0;
84 enum a3xx_color_swap swap = WZYX;
85 bool sint = false, uint = false;
86 struct fd_resource *rsc = NULL;
87 struct fdl_slice *slice = NULL;
88 uint32_t stride = 0;
89 uint32_t offset;
90 uint32_t tile_mode;
91
92 if (!pfb->cbufs[i])
93 continue;
94
95 mrt_comp[i] = 0xf;
96
97 struct pipe_surface *psurf = pfb->cbufs[i];
98 enum pipe_format pformat = psurf->format;
99 rsc = fd_resource(psurf->texture);
100 if (!rsc->bo)
101 continue;
102
103 uint32_t base = gmem ? gmem->cbuf_base[i] : 0;
104 slice = fd_resource_slice(rsc, psurf->u.tex.level);
105 format = fd6_pipe2color(pformat);
106 sint = util_format_is_pure_sint(pformat);
107 uint = util_format_is_pure_uint(pformat);
108
109 if (util_format_is_srgb(pformat))
110 srgb_cntl |= (1 << i);
111
112 offset = fd_resource_offset(rsc, psurf->u.tex.level,
113 psurf->u.tex.first_layer);
114
115 stride = fd_resource_pitch(rsc, psurf->u.tex.level);
116 swap = fd6_resource_swap(rsc, pformat);
117
118 tile_mode = fd_resource_tile_mode(psurf->texture, psurf->u.tex.level);
119 max_layer_index = psurf->u.tex.last_layer - psurf->u.tex.first_layer;
120
121 debug_assert((offset + slice->size0) <= fd_bo_size(rsc->bo));
122
123 OUT_REG(ring,
124 A6XX_RB_MRT_BUF_INFO(i,
125 .color_format = format,
126 .color_tile_mode = tile_mode,
127 .color_swap = swap),
128 A6XX_RB_MRT_PITCH(i, .a6xx_rb_mrt_pitch = stride),
129 A6XX_RB_MRT_ARRAY_PITCH(i, .a6xx_rb_mrt_array_pitch = slice->size0),
130 A6XX_RB_MRT_BASE(i, .bo = rsc->bo, .bo_offset = offset),
131 A6XX_RB_MRT_BASE_GMEM(i, .unknown = base));
132
133 OUT_REG(ring,
134 A6XX_SP_FS_MRT_REG(i, .color_format = format,
135 .color_sint = sint, .color_uint = uint));
136
137 OUT_PKT4(ring, REG_A6XX_RB_MRT_FLAG_BUFFER(i), 3);
138 fd6_emit_flag_reference(ring, rsc,
139 psurf->u.tex.level, psurf->u.tex.first_layer);
140 }
141
142 OUT_REG(ring, A6XX_RB_SRGB_CNTL(.dword = srgb_cntl));
143 OUT_REG(ring, A6XX_SP_SRGB_CNTL(.dword = srgb_cntl));
144
145 OUT_REG(ring, A6XX_RB_RENDER_COMPONENTS(
146 .rt0 = mrt_comp[0],
147 .rt1 = mrt_comp[1],
148 .rt2 = mrt_comp[2],
149 .rt3 = mrt_comp[3],
150 .rt4 = mrt_comp[4],
151 .rt5 = mrt_comp[5],
152 .rt6 = mrt_comp[6],
153 .rt7 = mrt_comp[7]));
154
155 OUT_REG(ring, A6XX_SP_FS_RENDER_COMPONENTS(
156 .rt0 = mrt_comp[0],
157 .rt1 = mrt_comp[1],
158 .rt2 = mrt_comp[2],
159 .rt3 = mrt_comp[3],
160 .rt4 = mrt_comp[4],
161 .rt5 = mrt_comp[5],
162 .rt6 = mrt_comp[6],
163 .rt7 = mrt_comp[7]));
164
165 OUT_REG(ring, A6XX_GRAS_MAX_LAYER_INDEX(max_layer_index));
166 }
167
168 static void
emit_zs(struct fd_ringbuffer * ring,struct pipe_surface * zsbuf,const struct fd_gmem_stateobj * gmem)169 emit_zs(struct fd_ringbuffer *ring, struct pipe_surface *zsbuf,
170 const struct fd_gmem_stateobj *gmem)
171 {
172 if (zsbuf) {
173 struct fd_resource *rsc = fd_resource(zsbuf->texture);
174 enum a6xx_depth_format fmt = fd6_pipe2depth(zsbuf->format);
175 uint32_t stride = fd_resource_pitch(rsc, 0);
176 uint32_t size = fd_resource_slice(rsc, 0)->size0;
177 uint32_t base = gmem ? gmem->zsbuf_base[0] : 0;
178 uint32_t offset = fd_resource_offset(rsc, zsbuf->u.tex.level,
179 zsbuf->u.tex.first_layer);
180
181 OUT_REG(ring,
182 A6XX_RB_DEPTH_BUFFER_INFO(.depth_format = fmt),
183 A6XX_RB_DEPTH_BUFFER_PITCH(.a6xx_rb_depth_buffer_pitch = stride),
184 A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(.a6xx_rb_depth_buffer_array_pitch = size),
185 A6XX_RB_DEPTH_BUFFER_BASE(.bo = rsc->bo, .bo_offset = offset),
186 A6XX_RB_DEPTH_BUFFER_BASE_GMEM(.dword = base));
187
188 OUT_REG(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = fmt));
189
190 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO, 3);
191 fd6_emit_flag_reference(ring, rsc,
192 zsbuf->u.tex.level, zsbuf->u.tex.first_layer);
193
194 if (rsc->lrz) {
195 OUT_REG(ring,
196 A6XX_GRAS_LRZ_BUFFER_BASE(.bo = rsc->lrz),
197 A6XX_GRAS_LRZ_BUFFER_PITCH(.pitch = rsc->lrz_pitch),
198 // XXX a6xx seems to use a different buffer here.. not sure what for..
199 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO(0),
200 A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI(0));
201 } else {
202 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
203 OUT_RING(ring, 0x00000000);
204 OUT_RING(ring, 0x00000000);
205 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
206 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
207 OUT_RING(ring, 0x00000000);
208 }
209
210 /* NOTE: blob emits GRAS_LRZ_CNTL plus GRAZ_LRZ_BUFFER_BASE
211 * plus this CP_EVENT_WRITE at the end in it's own IB..
212 */
213 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
214 OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(UNK_25));
215
216 if (rsc->stencil) {
217 stride = fd_resource_pitch(rsc->stencil, 0);
218 size = fd_resource_slice(rsc->stencil, 0)->size0;
219 uint32_t base = gmem ? gmem->zsbuf_base[1] : 0;
220
221 OUT_REG(ring,
222 A6XX_RB_STENCIL_INFO(.separate_stencil = true),
223 A6XX_RB_STENCIL_BUFFER_PITCH(.a6xx_rb_stencil_buffer_pitch = stride),
224 A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(.a6xx_rb_stencil_buffer_array_pitch = size),
225 A6XX_RB_STENCIL_BUFFER_BASE(.bo = rsc->stencil->bo),
226 A6XX_RB_STENCIL_BUFFER_BASE_GMEM(.dword = base));
227 } else {
228 OUT_REG(ring, A6XX_RB_STENCIL_INFO(0));
229 }
230 } else {
231 OUT_PKT4(ring, REG_A6XX_RB_DEPTH_BUFFER_INFO, 6);
232 OUT_RING(ring, A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(DEPTH6_NONE));
233 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_PITCH */
234 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_ARRAY_PITCH */
235 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_LO */
236 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_HI */
237 OUT_RING(ring, 0x00000000); /* RB_DEPTH_BUFFER_BASE_GMEM */
238
239 OUT_REG(ring, A6XX_GRAS_SU_DEPTH_BUFFER_INFO(.depth_format = DEPTH6_NONE));
240
241 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO, 5);
242 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_LO */
243 OUT_RING(ring, 0x00000000); /* RB_DEPTH_FLAG_BUFFER_BASE_HI */
244 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_BUFFER_PITCH */
245 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO */
246 OUT_RING(ring, 0x00000000); /* GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI */
247
248 OUT_REG(ring, A6XX_RB_STENCIL_INFO(0));
249 }
250 }
251
252 static bool
use_hw_binning(struct fd_batch * batch)253 use_hw_binning(struct fd_batch *batch)
254 {
255 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
256
257 if ((gmem->maxpw * gmem->maxph) > 32)
258 return false;
259
260 return fd_binning_enabled && ((gmem->nbins_x * gmem->nbins_y) >= 2) &&
261 (batch->num_draws > 0);
262 }
263
264 static void
patch_fb_read(struct fd_batch * batch)265 patch_fb_read(struct fd_batch *batch)
266 {
267 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
268
269 for (unsigned i = 0; i < fd_patch_num_elements(&batch->fb_read_patches); i++) {
270 struct fd_cs_patch *patch = fd_patch_element(&batch->fb_read_patches, i);
271 *patch->cs = patch->val | A6XX_TEX_CONST_2_PITCH(gmem->bin_w * gmem->cbuf_cpp[0]);
272 }
273 util_dynarray_clear(&batch->fb_read_patches);
274 }
275
276 static void
update_render_cntl(struct fd_batch * batch,struct pipe_framebuffer_state * pfb,bool binning)277 update_render_cntl(struct fd_batch *batch, struct pipe_framebuffer_state *pfb, bool binning)
278 {
279 struct fd_ringbuffer *ring = batch->gmem;
280 uint32_t cntl = 0;
281 bool depth_ubwc_enable = false;
282 uint32_t mrts_ubwc_enable = 0;
283 int i;
284
285 if (pfb->zsbuf) {
286 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
287 depth_ubwc_enable = fd_resource_ubwc_enabled(rsc, pfb->zsbuf->u.tex.level);
288 }
289
290 for (i = 0; i < pfb->nr_cbufs; i++) {
291 if (!pfb->cbufs[i])
292 continue;
293
294 struct pipe_surface *psurf = pfb->cbufs[i];
295 struct fd_resource *rsc = fd_resource(psurf->texture);
296 if (!rsc->bo)
297 continue;
298
299 if (fd_resource_ubwc_enabled(rsc, psurf->u.tex.level))
300 mrts_ubwc_enable |= 1 << i;
301 }
302
303 cntl |= A6XX_RB_RENDER_CNTL_UNK4;
304 if (binning)
305 cntl |= A6XX_RB_RENDER_CNTL_BINNING;
306
307 OUT_PKT7(ring, CP_REG_WRITE, 3);
308 OUT_RING(ring, CP_REG_WRITE_0_TRACKER(TRACK_RENDER_CNTL));
309 OUT_RING(ring, REG_A6XX_RB_RENDER_CNTL);
310 OUT_RING(ring, cntl |
311 COND(depth_ubwc_enable, A6XX_RB_RENDER_CNTL_FLAG_DEPTH) |
312 A6XX_RB_RENDER_CNTL_FLAG_MRTS(mrts_ubwc_enable));
313 }
314
315 /* extra size to store VSC_DRAW_STRM_SIZE: */
316 #define VSC_DRAW_STRM_SIZE(pitch) ((pitch) * 32 + 0x100)
317 #define VSC_PRIM_STRM_SIZE(pitch) ((pitch) * 32)
318
319 static void
update_vsc_pipe(struct fd_batch * batch)320 update_vsc_pipe(struct fd_batch *batch)
321 {
322 struct fd_context *ctx = batch->ctx;
323 struct fd6_context *fd6_ctx = fd6_context(ctx);
324 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
325 struct fd_ringbuffer *ring = batch->gmem;
326 int i;
327
328 if (batch->draw_strm_bits/8 > fd6_ctx->vsc_draw_strm_pitch) {
329 if (fd6_ctx->vsc_draw_strm)
330 fd_bo_del(fd6_ctx->vsc_draw_strm);
331 fd6_ctx->vsc_draw_strm = NULL;
332 /* Note: probably only need to align to 0x40, but aligning stronger
333 * reduces the odds that we will have to realloc again on the next
334 * frame:
335 */
336 fd6_ctx->vsc_draw_strm_pitch = align(batch->draw_strm_bits/8, 0x4000);
337 debug_printf("pre-resize VSC_DRAW_STRM_PITCH to: 0x%x\n",
338 fd6_ctx->vsc_draw_strm_pitch);
339 }
340
341 if (batch->prim_strm_bits/8 > fd6_ctx->vsc_prim_strm_pitch) {
342 if (fd6_ctx->vsc_prim_strm)
343 fd_bo_del(fd6_ctx->vsc_prim_strm);
344 fd6_ctx->vsc_prim_strm = NULL;
345 fd6_ctx->vsc_prim_strm_pitch = align(batch->prim_strm_bits/8, 0x4000);
346 debug_printf("pre-resize VSC_PRIM_STRM_PITCH to: 0x%x\n",
347 fd6_ctx->vsc_prim_strm_pitch);
348 }
349
350 if (!fd6_ctx->vsc_draw_strm) {
351 fd6_ctx->vsc_draw_strm = fd_bo_new(ctx->screen->dev,
352 VSC_DRAW_STRM_SIZE(fd6_ctx->vsc_draw_strm_pitch),
353 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_draw_strm");
354 }
355
356 if (!fd6_ctx->vsc_prim_strm) {
357 fd6_ctx->vsc_prim_strm = fd_bo_new(ctx->screen->dev,
358 VSC_PRIM_STRM_SIZE(fd6_ctx->vsc_prim_strm_pitch),
359 DRM_FREEDRENO_GEM_TYPE_KMEM, "vsc_prim_strm");
360 }
361
362 OUT_REG(ring,
363 A6XX_VSC_BIN_SIZE(.width = gmem->bin_w, .height = gmem->bin_h),
364 A6XX_VSC_DRAW_STRM_SIZE_ADDRESS(
365 .bo = fd6_ctx->vsc_draw_strm,
366 .bo_offset = 32 * fd6_ctx->vsc_draw_strm_pitch));
367
368 OUT_REG(ring, A6XX_VSC_BIN_COUNT(.nx = gmem->nbins_x,
369 .ny = gmem->nbins_y));
370
371 OUT_PKT4(ring, REG_A6XX_VSC_PIPE_CONFIG_REG(0), 32);
372 for (i = 0; i < 32; i++) {
373 const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[i];
374 OUT_RING(ring, A6XX_VSC_PIPE_CONFIG_REG_X(pipe->x) |
375 A6XX_VSC_PIPE_CONFIG_REG_Y(pipe->y) |
376 A6XX_VSC_PIPE_CONFIG_REG_W(pipe->w) |
377 A6XX_VSC_PIPE_CONFIG_REG_H(pipe->h));
378 }
379
380 OUT_REG(ring,
381 A6XX_VSC_PRIM_STRM_ADDRESS(.bo = fd6_ctx->vsc_prim_strm),
382 A6XX_VSC_PRIM_STRM_PITCH(.dword = fd6_ctx->vsc_prim_strm_pitch),
383 A6XX_VSC_PRIM_STRM_LIMIT(.dword = fd6_ctx->vsc_prim_strm_pitch - 64));
384
385 OUT_REG(ring,
386 A6XX_VSC_DRAW_STRM_ADDRESS(.bo = fd6_ctx->vsc_draw_strm),
387 A6XX_VSC_DRAW_STRM_PITCH(.dword = fd6_ctx->vsc_draw_strm_pitch),
388 A6XX_VSC_DRAW_STRM_LIMIT(.dword = fd6_ctx->vsc_draw_strm_pitch - 64));
389 }
390
391 /*
392 * If overflow is detected, either 0x1 (VSC_DRAW_STRM overflow) or 0x3
393 * (VSC_PRIM_STRM overflow) plus the size of the overflowed buffer is
394 * written to control->vsc_overflow. This allows the CPU to
395 * detect which buffer overflowed (and, since the current size is
396 * encoded as well, this protects against already-submitted but
397 * not executed batches from fooling the CPU into increasing the
398 * size again unnecessarily).
399 */
400 static void
emit_vsc_overflow_test(struct fd_batch * batch)401 emit_vsc_overflow_test(struct fd_batch *batch)
402 {
403 struct fd_ringbuffer *ring = batch->gmem;
404 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
405 struct fd6_context *fd6_ctx = fd6_context(batch->ctx);
406
407 debug_assert((fd6_ctx->vsc_draw_strm_pitch & 0x3) == 0);
408 debug_assert((fd6_ctx->vsc_prim_strm_pitch & 0x3) == 0);
409
410 /* Check for overflow, write vsc_scratch if detected: */
411 for (int i = 0; i < gmem->num_vsc_pipes; i++) {
412 OUT_PKT7(ring, CP_COND_WRITE5, 8);
413 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
414 CP_COND_WRITE5_0_WRITE_MEMORY);
415 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_DRAW_STRM_SIZE_REG(i)));
416 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
417 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_draw_strm_pitch - 64));
418 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
419 OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
420 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(1 + fd6_ctx->vsc_draw_strm_pitch));
421
422 OUT_PKT7(ring, CP_COND_WRITE5, 8);
423 OUT_RING(ring, CP_COND_WRITE5_0_FUNCTION(WRITE_GE) |
424 CP_COND_WRITE5_0_WRITE_MEMORY);
425 OUT_RING(ring, CP_COND_WRITE5_1_POLL_ADDR_LO(REG_A6XX_VSC_PRIM_STRM_SIZE_REG(i)));
426 OUT_RING(ring, CP_COND_WRITE5_2_POLL_ADDR_HI(0));
427 OUT_RING(ring, CP_COND_WRITE5_3_REF(fd6_ctx->vsc_prim_strm_pitch - 64));
428 OUT_RING(ring, CP_COND_WRITE5_4_MASK(~0));
429 OUT_RELOC(ring, control_ptr(fd6_ctx, vsc_overflow)); /* WRITE_ADDR_LO/HI */
430 OUT_RING(ring, CP_COND_WRITE5_7_WRITE_DATA(3 + fd6_ctx->vsc_prim_strm_pitch));
431 }
432
433 OUT_PKT7(ring, CP_WAIT_MEM_WRITES, 0);
434 }
435
436 static void
check_vsc_overflow(struct fd_context * ctx)437 check_vsc_overflow(struct fd_context *ctx)
438 {
439 struct fd6_context *fd6_ctx = fd6_context(ctx);
440 struct fd6_control *control = fd_bo_map(fd6_ctx->control_mem);
441 uint32_t vsc_overflow = control->vsc_overflow;
442
443 if (!vsc_overflow)
444 return;
445
446 /* clear overflow flag: */
447 control->vsc_overflow = 0;
448
449 unsigned buffer = vsc_overflow & 0x3;
450 unsigned size = vsc_overflow & ~0x3;
451
452 if (buffer == 0x1) {
453 /* VSC_DRAW_STRM overflow: */
454
455 if (size < fd6_ctx->vsc_draw_strm_pitch) {
456 /* we've already increased the size, this overflow is
457 * from a batch submitted before resize, but executed
458 * after
459 */
460 return;
461 }
462
463 fd_bo_del(fd6_ctx->vsc_draw_strm);
464 fd6_ctx->vsc_draw_strm = NULL;
465 fd6_ctx->vsc_draw_strm_pitch *= 2;
466
467 debug_printf("resized VSC_DRAW_STRM_PITCH to: 0x%x\n",
468 fd6_ctx->vsc_draw_strm_pitch);
469
470 } else if (buffer == 0x3) {
471 /* VSC_PRIM_STRM overflow: */
472
473 if (size < fd6_ctx->vsc_prim_strm_pitch) {
474 /* we've already increased the size */
475 return;
476 }
477
478 fd_bo_del(fd6_ctx->vsc_prim_strm);
479 fd6_ctx->vsc_prim_strm = NULL;
480 fd6_ctx->vsc_prim_strm_pitch *= 2;
481
482 debug_printf("resized VSC_PRIM_STRM_PITCH to: 0x%x\n",
483 fd6_ctx->vsc_prim_strm_pitch);
484
485 } else {
486 /* NOTE: it's possible, for example, for overflow to corrupt the
487 * control page. I mostly just see this hit if I set initial VSC
488 * buffer size extremely small. Things still seem to recover,
489 * but maybe we should pre-emptively realloc vsc_data/vsc_data2
490 * and hope for different memory placement?
491 */
492 DBG("invalid vsc_overflow value: 0x%08x", vsc_overflow);
493 }
494 }
495
496 /*
497 * Emit conditional CP_INDIRECT_BRANCH based on VSC_STATE[p], ie. the IB
498 * is skipped for tiles that have no visible geometry.
499 */
500 static void
emit_conditional_ib(struct fd_batch * batch,const struct fd_tile * tile,struct fd_ringbuffer * target)501 emit_conditional_ib(struct fd_batch *batch, const struct fd_tile *tile,
502 struct fd_ringbuffer *target)
503 {
504 struct fd_ringbuffer *ring = batch->gmem;
505
506 if (target->cur == target->start)
507 return;
508
509 emit_marker6(ring, 6);
510
511 unsigned count = fd_ringbuffer_cmd_count(target);
512
513 BEGIN_RING(ring, 5 + 4 * count); /* ensure conditional doesn't get split */
514
515 OUT_PKT7(ring, CP_REG_TEST, 1);
516 OUT_RING(ring, A6XX_CP_REG_TEST_0_REG(REG_A6XX_VSC_STATE_REG(tile->p)) |
517 A6XX_CP_REG_TEST_0_BIT(tile->n) |
518 A6XX_CP_REG_TEST_0_WAIT_FOR_ME);
519
520 OUT_PKT7(ring, CP_COND_REG_EXEC, 2);
521 OUT_RING(ring, CP_COND_REG_EXEC_0_MODE(PRED_TEST));
522 OUT_RING(ring, CP_COND_REG_EXEC_1_DWORDS(4 * count));
523
524 for (unsigned i = 0; i < count; i++) {
525 uint32_t dwords;
526 OUT_PKT7(ring, CP_INDIRECT_BUFFER, 3);
527 dwords = fd_ringbuffer_emit_reloc_ring_full(ring, target, i) / 4;
528 assert(dwords > 0);
529 OUT_RING(ring, dwords);
530 }
531
532 emit_marker6(ring, 6);
533 }
534
535 static void
set_scissor(struct fd_ringbuffer * ring,uint32_t x1,uint32_t y1,uint32_t x2,uint32_t y2)536 set_scissor(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2)
537 {
538 OUT_REG(ring,
539 A6XX_GRAS_SC_WINDOW_SCISSOR_TL(.x = x1, .y = y1),
540 A6XX_GRAS_SC_WINDOW_SCISSOR_BR(.x = x2, .y = y2));
541
542 OUT_REG(ring,
543 A6XX_GRAS_2D_RESOLVE_CNTL_1(.x = x1, .y = y1),
544 A6XX_GRAS_2D_RESOLVE_CNTL_2(.x = x2, .y = y2));
545 }
546
547 static void
set_bin_size(struct fd_ringbuffer * ring,uint32_t w,uint32_t h,uint32_t flag)548 set_bin_size(struct fd_ringbuffer *ring, uint32_t w, uint32_t h, uint32_t flag)
549 {
550 OUT_REG(ring, A6XX_GRAS_BIN_CONTROL(.binw = w, .binh = h, .dword = flag));
551 OUT_REG(ring, A6XX_RB_BIN_CONTROL(.binw = w, .binh = h, .dword = flag));
552 /* no flag for RB_BIN_CONTROL2... */
553 OUT_REG(ring, A6XX_RB_BIN_CONTROL2(.binw = w, .binh = h));
554 }
555
556 static void
emit_binning_pass(struct fd_batch * batch)557 emit_binning_pass(struct fd_batch *batch)
558 {
559 struct fd_ringbuffer *ring = batch->gmem;
560 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
561 struct fd_screen *screen = batch->ctx->screen;
562
563 debug_assert(!batch->tessellation);
564
565 set_scissor(ring, 0, 0, gmem->width - 1, gmem->height - 1);
566
567 emit_marker6(ring, 7);
568 OUT_PKT7(ring, CP_SET_MARKER, 1);
569 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BINNING));
570 emit_marker6(ring, 7);
571
572 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
573 OUT_RING(ring, 0x1);
574
575 OUT_PKT7(ring, CP_SET_MODE, 1);
576 OUT_RING(ring, 0x1);
577
578 OUT_WFI5(ring);
579
580 OUT_REG(ring, A6XX_VFD_MODE_CNTL(.binning_pass = true));
581
582 update_vsc_pipe(batch);
583
584 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
585 OUT_RING(ring, screen->info.a6xx.magic.PC_UNKNOWN_9805);
586
587 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
588 OUT_RING(ring, screen->info.a6xx.magic.SP_UNKNOWN_A0F8);
589
590 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
591 OUT_RING(ring, UNK_2C);
592
593 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
594 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(0) |
595 A6XX_RB_WINDOW_OFFSET_Y(0));
596
597 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
598 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(0) |
599 A6XX_SP_TP_WINDOW_OFFSET_Y(0));
600
601 /* emit IB to binning drawcmds: */
602 fd_log(batch, "GMEM: START BINNING IB");
603 fd6_emit_ib(ring, batch->draw);
604 fd_log(batch, "GMEM: END BINNING IB");
605
606 fd_reset_wfi(batch);
607
608 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
609 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
610 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
611 CP_SET_DRAW_STATE__0_GROUP_ID(0));
612 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
613 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
614
615 OUT_PKT7(ring, CP_EVENT_WRITE, 1);
616 OUT_RING(ring, UNK_2D);
617
618 fd6_cache_inv(batch, ring);
619 fd6_cache_flush(batch, ring);
620 fd_wfi(batch, ring);
621
622 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
623
624 fd_log(batch, "START VSC OVERFLOW TEST");
625 emit_vsc_overflow_test(batch);
626 fd_log(batch, "END VSC OVERFLOW TEST");
627
628 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
629 OUT_RING(ring, 0x0);
630
631 OUT_PKT7(ring, CP_SET_MODE, 1);
632 OUT_RING(ring, 0x0);
633
634 OUT_WFI5(ring);
635
636 OUT_REG(ring,
637 A6XX_RB_CCU_CNTL(.offset = screen->info.a6xx.ccu_offset_gmem,
638 .gmem = true,
639 .unk2 = screen->info.a6xx.ccu_cntl_gmem_unk2));
640 }
641
642 static void
emit_msaa(struct fd_ringbuffer * ring,unsigned nr)643 emit_msaa(struct fd_ringbuffer *ring, unsigned nr)
644 {
645 enum a3xx_msaa_samples samples = fd_msaa_samples(nr);
646
647 OUT_PKT4(ring, REG_A6XX_SP_TP_RAS_MSAA_CNTL, 2);
648 OUT_RING(ring, A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(samples));
649 OUT_RING(ring, A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(samples) |
650 COND(samples == MSAA_ONE, A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE));
651
652 OUT_PKT4(ring, REG_A6XX_GRAS_RAS_MSAA_CNTL, 2);
653 OUT_RING(ring, A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(samples));
654 OUT_RING(ring, A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(samples) |
655 COND(samples == MSAA_ONE, A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE));
656
657 OUT_PKT4(ring, REG_A6XX_RB_RAS_MSAA_CNTL, 2);
658 OUT_RING(ring, A6XX_RB_RAS_MSAA_CNTL_SAMPLES(samples));
659 OUT_RING(ring, A6XX_RB_DEST_MSAA_CNTL_SAMPLES(samples) |
660 COND(samples == MSAA_ONE, A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE));
661
662 OUT_PKT4(ring, REG_A6XX_RB_MSAA_CNTL, 1);
663 OUT_RING(ring, A6XX_RB_MSAA_CNTL_SAMPLES(samples));
664 }
665
666 static void prepare_tile_setup_ib(struct fd_batch *batch);
667 static void prepare_tile_fini_ib(struct fd_batch *batch);
668
669 /* before first tile */
670 static void
fd6_emit_tile_init(struct fd_batch * batch)671 fd6_emit_tile_init(struct fd_batch *batch)
672 {
673 struct fd_ringbuffer *ring = batch->gmem;
674 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
675 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
676 struct fd_screen *screen = batch->ctx->screen;
677
678 fd6_emit_restore(batch, ring);
679
680 fd6_emit_lrz_flush(ring);
681
682 if (batch->prologue) {
683 fd_log(batch, "START PROLOGUE");
684 fd6_emit_ib(ring, batch->prologue);
685 fd_log(batch, "END PROLOGUE");
686 }
687
688 fd6_cache_inv(batch, ring);
689
690 prepare_tile_setup_ib(batch);
691 prepare_tile_fini_ib(batch);
692
693 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
694 OUT_RING(ring, 0x0);
695
696 /* blob controls "local" in IB2, but I think that is not required */
697 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
698 OUT_RING(ring, 0x1);
699
700 fd_wfi(batch, ring);
701 OUT_REG(ring,
702 A6XX_RB_CCU_CNTL(.offset = screen->info.a6xx.ccu_offset_gmem,
703 .gmem = true,
704 .unk2 = screen->info.a6xx.ccu_cntl_gmem_unk2));
705
706 emit_zs(ring, pfb->zsbuf, batch->gmem_state);
707 emit_mrt(ring, pfb, batch->gmem_state);
708 emit_msaa(ring, pfb->samples);
709 patch_fb_read(batch);
710
711 if (use_hw_binning(batch)) {
712 /* enable stream-out during binning pass: */
713 OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
714
715 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
716 A6XX_RB_BIN_CONTROL_BINNING_PASS | 0x6000000);
717 update_render_cntl(batch, pfb, true);
718 emit_binning_pass(batch);
719
720 /* and disable stream-out for draw pass: */
721 OUT_REG(ring, A6XX_VPC_SO_DISABLE(true));
722
723 /*
724 * NOTE: even if we detect VSC overflow and disable use of
725 * visibility stream in draw pass, it is still safe to execute
726 * the reset of these cmds:
727 */
728
729 // NOTE a618 not setting .USE_VIZ .. from a quick check on a630, it
730 // does not appear that this bit changes much (ie. it isn't actually
731 // .USE_VIZ like previous gens)
732 set_bin_size(ring, gmem->bin_w, gmem->bin_h,
733 A6XX_RB_BIN_CONTROL_USE_VIZ | 0x6000000);
734
735 OUT_PKT4(ring, REG_A6XX_VFD_MODE_CNTL, 1);
736 OUT_RING(ring, 0x0);
737
738 OUT_PKT4(ring, REG_A6XX_PC_UNKNOWN_9805, 1);
739 OUT_RING(ring, screen->info.a6xx.magic.PC_UNKNOWN_9805);
740
741 OUT_PKT4(ring, REG_A6XX_SP_UNKNOWN_A0F8, 1);
742 OUT_RING(ring, screen->info.a6xx.magic.SP_UNKNOWN_A0F8);
743
744 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
745 OUT_RING(ring, 0x1);
746 } else {
747 /* no binning pass, so enable stream-out for draw pass:: */
748 OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
749
750 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
751 }
752
753 update_render_cntl(batch, pfb, false);
754 }
755
756 static void
set_window_offset(struct fd_ringbuffer * ring,uint32_t x1,uint32_t y1)757 set_window_offset(struct fd_ringbuffer *ring, uint32_t x1, uint32_t y1)
758 {
759 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET, 1);
760 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET_X(x1) |
761 A6XX_RB_WINDOW_OFFSET_Y(y1));
762
763 OUT_PKT4(ring, REG_A6XX_RB_WINDOW_OFFSET2, 1);
764 OUT_RING(ring, A6XX_RB_WINDOW_OFFSET2_X(x1) |
765 A6XX_RB_WINDOW_OFFSET2_Y(y1));
766
767 OUT_PKT4(ring, REG_A6XX_SP_WINDOW_OFFSET, 1);
768 OUT_RING(ring, A6XX_SP_WINDOW_OFFSET_X(x1) |
769 A6XX_SP_WINDOW_OFFSET_Y(y1));
770
771 OUT_PKT4(ring, REG_A6XX_SP_TP_WINDOW_OFFSET, 1);
772 OUT_RING(ring, A6XX_SP_TP_WINDOW_OFFSET_X(x1) |
773 A6XX_SP_TP_WINDOW_OFFSET_Y(y1));
774 }
775
776 /* before mem2gmem */
777 static void
fd6_emit_tile_prep(struct fd_batch * batch,const struct fd_tile * tile)778 fd6_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
779 {
780 struct fd_context *ctx = batch->ctx;
781 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
782 struct fd6_context *fd6_ctx = fd6_context(ctx);
783 struct fd_ringbuffer *ring = batch->gmem;
784
785 emit_marker6(ring, 7);
786 OUT_PKT7(ring, CP_SET_MARKER, 1);
787 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_GMEM));
788 emit_marker6(ring, 7);
789
790 uint32_t x1 = tile->xoff;
791 uint32_t y1 = tile->yoff;
792 uint32_t x2 = tile->xoff + tile->bin_w - 1;
793 uint32_t y2 = tile->yoff + tile->bin_h - 1;
794
795 set_scissor(ring, x1, y1, x2, y2);
796
797 if (use_hw_binning(batch)) {
798 const struct fd_vsc_pipe *pipe = &gmem->vsc_pipe[tile->p];
799
800 OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
801
802 OUT_PKT7(ring, CP_SET_MODE, 1);
803 OUT_RING(ring, 0x0);
804
805 OUT_PKT7(ring, CP_SET_BIN_DATA5, 7);
806 OUT_RING(ring, CP_SET_BIN_DATA5_0_VSC_SIZE(pipe->w * pipe->h) |
807 CP_SET_BIN_DATA5_0_VSC_N(tile->n));
808 OUT_RELOC(ring, fd6_ctx->vsc_draw_strm, /* per-pipe draw-stream address */
809 (tile->p * fd6_ctx->vsc_draw_strm_pitch), 0, 0);
810 OUT_RELOC(ring, fd6_ctx->vsc_draw_strm, /* VSC_DRAW_STRM_ADDRESS + (p * 4) */
811 (tile->p * 4) + (32 * fd6_ctx->vsc_draw_strm_pitch), 0, 0);
812 OUT_RELOC(ring, fd6_ctx->vsc_prim_strm,
813 (tile->p * fd6_ctx->vsc_prim_strm_pitch), 0, 0);
814
815 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
816 OUT_RING(ring, 0x0);
817
818 set_window_offset(ring, x1, y1);
819
820 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
821 set_bin_size(ring, gmem->bin_w, gmem->bin_h, 0x6000000);
822
823 OUT_PKT7(ring, CP_SET_MODE, 1);
824 OUT_RING(ring, 0x0);
825 } else {
826 set_window_offset(ring, x1, y1);
827
828 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
829 OUT_RING(ring, 0x1);
830
831 OUT_PKT7(ring, CP_SET_MODE, 1);
832 OUT_RING(ring, 0x0);
833 }
834 }
835
836 static void
set_blit_scissor(struct fd_batch * batch,struct fd_ringbuffer * ring)837 set_blit_scissor(struct fd_batch *batch, struct fd_ringbuffer *ring)
838 {
839 struct pipe_scissor_state blit_scissor = batch->max_scissor;
840
841 blit_scissor.minx = ROUND_DOWN_TO(blit_scissor.minx, 16);
842 blit_scissor.miny = ROUND_DOWN_TO(blit_scissor.miny, 4);
843 blit_scissor.maxx = ALIGN(blit_scissor.maxx, 16);
844 blit_scissor.maxy = ALIGN(blit_scissor.maxy, 4);
845
846 OUT_PKT4(ring, REG_A6XX_RB_BLIT_SCISSOR_TL, 2);
847 OUT_RING(ring,
848 A6XX_RB_BLIT_SCISSOR_TL_X(blit_scissor.minx) |
849 A6XX_RB_BLIT_SCISSOR_TL_Y(blit_scissor.miny));
850 OUT_RING(ring,
851 A6XX_RB_BLIT_SCISSOR_BR_X(blit_scissor.maxx - 1) |
852 A6XX_RB_BLIT_SCISSOR_BR_Y(blit_scissor.maxy - 1));
853 }
854
855 static void
emit_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,bool stencil)856 emit_blit(struct fd_batch *batch,
857 struct fd_ringbuffer *ring,
858 uint32_t base,
859 struct pipe_surface *psurf,
860 bool stencil)
861 {
862 struct fd_resource *rsc = fd_resource(psurf->texture);
863 enum pipe_format pfmt = psurf->format;
864 uint32_t offset;
865 bool ubwc_enabled;
866
867 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
868
869 /* separate stencil case: */
870 if (stencil) {
871 rsc = rsc->stencil;
872 pfmt = rsc->base.format;
873 }
874
875 offset = fd_resource_offset(rsc, psurf->u.tex.level,
876 psurf->u.tex.first_layer);
877 ubwc_enabled = fd_resource_ubwc_enabled(rsc, psurf->u.tex.level);
878
879 debug_assert(psurf->u.tex.first_layer == psurf->u.tex.last_layer);
880
881 enum a6xx_format format = fd6_pipe2color(pfmt);
882 uint32_t stride = fd_resource_pitch(rsc, psurf->u.tex.level);
883 uint32_t size = fd_resource_slice(rsc, psurf->u.tex.level)->size0;
884 enum a3xx_color_swap swap = fd6_resource_swap(rsc, pfmt);
885 enum a3xx_msaa_samples samples =
886 fd_msaa_samples(rsc->base.nr_samples);
887 uint32_t tile_mode = fd_resource_tile_mode(&rsc->base, psurf->u.tex.level);
888
889 OUT_REG(ring,
890 A6XX_RB_BLIT_DST_INFO(.tile_mode = tile_mode, .samples = samples,
891 .color_format = format, .color_swap = swap, .flags = ubwc_enabled),
892 A6XX_RB_BLIT_DST(.bo = rsc->bo, .bo_offset = offset),
893 A6XX_RB_BLIT_DST_PITCH(.a6xx_rb_blit_dst_pitch = stride),
894 A6XX_RB_BLIT_DST_ARRAY_PITCH(.a6xx_rb_blit_dst_array_pitch = size));
895
896 OUT_REG(ring, A6XX_RB_BLIT_BASE_GMEM(.dword = base));
897
898 if (ubwc_enabled) {
899 OUT_PKT4(ring, REG_A6XX_RB_BLIT_FLAG_DST_LO, 3);
900 fd6_emit_flag_reference(ring, rsc,
901 psurf->u.tex.level, psurf->u.tex.first_layer);
902 }
903
904 fd6_emit_blit(batch, ring);
905 }
906
907 static void
emit_restore_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,unsigned buffer)908 emit_restore_blit(struct fd_batch *batch,
909 struct fd_ringbuffer *ring,
910 uint32_t base,
911 struct pipe_surface *psurf,
912 unsigned buffer)
913 {
914 bool stencil = (buffer == FD_BUFFER_STENCIL);
915
916 OUT_REG(ring, A6XX_RB_BLIT_INFO(
917 .gmem = true, .unk0 = true,
918 .depth = (buffer == FD_BUFFER_DEPTH),
919 .integer = util_format_is_pure_integer(psurf->format)));
920
921 emit_blit(batch, ring, base, psurf, stencil);
922 }
923
924 static void
emit_clears(struct fd_batch * batch,struct fd_ringbuffer * ring)925 emit_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
926 {
927 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
928 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
929 enum a3xx_msaa_samples samples = fd_msaa_samples(pfb->samples);
930
931 uint32_t buffers = batch->fast_cleared;
932
933 if (buffers & PIPE_CLEAR_COLOR) {
934
935 for (int i = 0; i < pfb->nr_cbufs; i++) {
936 union pipe_color_union *color = &batch->clear_color[i];
937 union util_color uc = {0};
938
939 if (!pfb->cbufs[i])
940 continue;
941
942 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
943 continue;
944
945 enum pipe_format pfmt = pfb->cbufs[i]->format;
946
947 // XXX I think RB_CLEAR_COLOR_DWn wants to take into account SWAP??
948 union pipe_color_union swapped;
949 switch (fd6_pipe2swap(pfmt)) {
950 case WZYX:
951 swapped.ui[0] = color->ui[0];
952 swapped.ui[1] = color->ui[1];
953 swapped.ui[2] = color->ui[2];
954 swapped.ui[3] = color->ui[3];
955 break;
956 case WXYZ:
957 swapped.ui[2] = color->ui[0];
958 swapped.ui[1] = color->ui[1];
959 swapped.ui[0] = color->ui[2];
960 swapped.ui[3] = color->ui[3];
961 break;
962 case ZYXW:
963 swapped.ui[3] = color->ui[0];
964 swapped.ui[0] = color->ui[1];
965 swapped.ui[1] = color->ui[2];
966 swapped.ui[2] = color->ui[3];
967 break;
968 case XYZW:
969 swapped.ui[3] = color->ui[0];
970 swapped.ui[2] = color->ui[1];
971 swapped.ui[1] = color->ui[2];
972 swapped.ui[0] = color->ui[3];
973 break;
974 }
975
976 util_pack_color_union(pfmt, &uc, &swapped);
977
978 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
979 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
980 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
981 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
982
983 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
984 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
985 A6XX_RB_BLIT_INFO_CLEAR_MASK(0xf));
986
987 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
988 OUT_RING(ring, gmem->cbuf_base[i]);
989
990 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
991 OUT_RING(ring, 0);
992
993 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 4);
994 OUT_RING(ring, uc.ui[0]);
995 OUT_RING(ring, uc.ui[1]);
996 OUT_RING(ring, uc.ui[2]);
997 OUT_RING(ring, uc.ui[3]);
998
999 fd6_emit_blit(batch, ring);
1000 }
1001 }
1002
1003 const bool has_depth = pfb->zsbuf;
1004 const bool has_separate_stencil =
1005 has_depth && fd_resource(pfb->zsbuf->texture)->stencil;
1006
1007 /* First clear depth or combined depth/stencil. */
1008 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1009 (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1010 enum pipe_format pfmt = pfb->zsbuf->format;
1011 uint32_t clear_value;
1012 uint32_t mask = 0;
1013
1014 if (has_separate_stencil) {
1015 pfmt = util_format_get_depth_only(pfb->zsbuf->format);
1016 clear_value = util_pack_z(pfmt, batch->clear_depth);
1017 } else {
1018 pfmt = pfb->zsbuf->format;
1019 clear_value = util_pack_z_stencil(pfmt, batch->clear_depth,
1020 batch->clear_stencil);
1021 }
1022
1023 if (buffers & PIPE_CLEAR_DEPTH)
1024 mask |= 0x1;
1025
1026 if (!has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL))
1027 mask |= 0x2;
1028
1029 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1030 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1031 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1032 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(fd6_pipe2color(pfmt)));
1033
1034 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1035 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1036 // XXX UNK0 for separate stencil ??
1037 A6XX_RB_BLIT_INFO_DEPTH |
1038 A6XX_RB_BLIT_INFO_CLEAR_MASK(mask));
1039
1040 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1041 OUT_RING(ring, gmem->zsbuf_base[0]);
1042
1043 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1044 OUT_RING(ring, 0);
1045
1046 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1047 OUT_RING(ring, clear_value);
1048
1049 fd6_emit_blit(batch, ring);
1050 }
1051
1052 /* Then clear the separate stencil buffer in case of 32 bit depth
1053 * formats with separate stencil. */
1054 if (has_separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1055 OUT_PKT4(ring, REG_A6XX_RB_BLIT_DST_INFO, 1);
1056 OUT_RING(ring, A6XX_RB_BLIT_DST_INFO_TILE_MODE(TILE6_LINEAR) |
1057 A6XX_RB_BLIT_DST_INFO_SAMPLES(samples) |
1058 A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(FMT6_8_UINT));
1059
1060 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1061 OUT_RING(ring, A6XX_RB_BLIT_INFO_GMEM |
1062 //A6XX_RB_BLIT_INFO_UNK0 |
1063 A6XX_RB_BLIT_INFO_DEPTH |
1064 A6XX_RB_BLIT_INFO_CLEAR_MASK(0x1));
1065
1066 OUT_PKT4(ring, REG_A6XX_RB_BLIT_BASE_GMEM, 1);
1067 OUT_RING(ring, gmem->zsbuf_base[1]);
1068
1069 OUT_PKT4(ring, REG_A6XX_RB_UNKNOWN_88D0, 1);
1070 OUT_RING(ring, 0);
1071
1072 OUT_PKT4(ring, REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0, 1);
1073 OUT_RING(ring, batch->clear_stencil & 0xff);
1074
1075 fd6_emit_blit(batch, ring);
1076 }
1077 }
1078
1079 /*
1080 * transfer from system memory to gmem
1081 */
1082 static void
emit_restore_blits(struct fd_batch * batch,struct fd_ringbuffer * ring)1083 emit_restore_blits(struct fd_batch *batch, struct fd_ringbuffer *ring)
1084 {
1085 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1086 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1087
1088 if (batch->restore & FD_BUFFER_COLOR) {
1089 unsigned i;
1090 for (i = 0; i < pfb->nr_cbufs; i++) {
1091 if (!pfb->cbufs[i])
1092 continue;
1093 if (!(batch->restore & (PIPE_CLEAR_COLOR0 << i)))
1094 continue;
1095 emit_restore_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1096 FD_BUFFER_COLOR);
1097 }
1098 }
1099
1100 if (batch->restore & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1101 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1102
1103 if (!rsc->stencil || (batch->restore & FD_BUFFER_DEPTH)) {
1104 emit_restore_blit(batch, ring, gmem->zsbuf_base[0], pfb->zsbuf,
1105 FD_BUFFER_DEPTH);
1106 }
1107 if (rsc->stencil && (batch->restore & FD_BUFFER_STENCIL)) {
1108 emit_restore_blit(batch, ring, gmem->zsbuf_base[1], pfb->zsbuf,
1109 FD_BUFFER_STENCIL);
1110 }
1111 }
1112 }
1113
1114 static void
prepare_tile_setup_ib(struct fd_batch * batch)1115 prepare_tile_setup_ib(struct fd_batch *batch)
1116 {
1117 if (!(batch->restore || batch->fast_cleared))
1118 return;
1119
1120 batch->tile_setup = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1121 FD_RINGBUFFER_STREAMING);
1122
1123 set_blit_scissor(batch, batch->tile_setup);
1124
1125 emit_restore_blits(batch, batch->tile_setup);
1126 emit_clears(batch, batch->tile_setup);
1127 }
1128
1129 /*
1130 * transfer from system memory to gmem
1131 */
1132 static void
fd6_emit_tile_mem2gmem(struct fd_batch * batch,const struct fd_tile * tile)1133 fd6_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
1134 {
1135 }
1136
1137 /* before IB to rendering cmds: */
1138 static void
fd6_emit_tile_renderprep(struct fd_batch * batch,const struct fd_tile * tile)1139 fd6_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
1140 {
1141 if (!batch->tile_setup)
1142 return;
1143
1144 fd_log(batch, "TILE: START CLEAR/RESTORE");
1145 if (batch->fast_cleared || !use_hw_binning(batch)) {
1146 fd6_emit_ib(batch->gmem, batch->tile_setup);
1147 } else {
1148 emit_conditional_ib(batch, tile, batch->tile_setup);
1149 }
1150 fd_log(batch, "TILE: END CLEAR/RESTORE");
1151 }
1152
1153 static void
emit_resolve_blit(struct fd_batch * batch,struct fd_ringbuffer * ring,uint32_t base,struct pipe_surface * psurf,unsigned buffer)1154 emit_resolve_blit(struct fd_batch *batch,
1155 struct fd_ringbuffer *ring,
1156 uint32_t base,
1157 struct pipe_surface *psurf,
1158 unsigned buffer)
1159 {
1160 uint32_t info = 0;
1161 bool stencil = false;
1162
1163 if (!fd_resource(psurf->texture)->valid)
1164 return;
1165
1166 switch (buffer) {
1167 case FD_BUFFER_COLOR:
1168 break;
1169 case FD_BUFFER_STENCIL:
1170 info |= A6XX_RB_BLIT_INFO_UNK0;
1171 stencil = true;
1172 break;
1173 case FD_BUFFER_DEPTH:
1174 info |= A6XX_RB_BLIT_INFO_DEPTH;
1175 break;
1176 }
1177
1178 if (util_format_is_pure_integer(psurf->format))
1179 info |= A6XX_RB_BLIT_INFO_INTEGER;
1180
1181 OUT_PKT4(ring, REG_A6XX_RB_BLIT_INFO, 1);
1182 OUT_RING(ring, info);
1183
1184 emit_blit(batch, ring, base, psurf, stencil);
1185 }
1186
1187 /*
1188 * transfer from gmem to system memory (ie. normal RAM)
1189 */
1190
1191 static void
prepare_tile_fini_ib(struct fd_batch * batch)1192 prepare_tile_fini_ib(struct fd_batch *batch)
1193 {
1194 const struct fd_gmem_stateobj *gmem = batch->gmem_state;
1195 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1196 struct fd_ringbuffer *ring;
1197
1198 batch->tile_fini = fd_submit_new_ringbuffer(batch->submit, 0x1000,
1199 FD_RINGBUFFER_STREAMING);
1200 ring = batch->tile_fini;
1201
1202 set_blit_scissor(batch, ring);
1203
1204 if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
1205 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
1206
1207 if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH)) {
1208 emit_resolve_blit(batch, ring,
1209 gmem->zsbuf_base[0], pfb->zsbuf,
1210 FD_BUFFER_DEPTH);
1211 }
1212 if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL)) {
1213 emit_resolve_blit(batch, ring,
1214 gmem->zsbuf_base[1], pfb->zsbuf,
1215 FD_BUFFER_STENCIL);
1216 }
1217 }
1218
1219 if (batch->resolve & FD_BUFFER_COLOR) {
1220 unsigned i;
1221 for (i = 0; i < pfb->nr_cbufs; i++) {
1222 if (!pfb->cbufs[i])
1223 continue;
1224 if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
1225 continue;
1226 emit_resolve_blit(batch, ring, gmem->cbuf_base[i], pfb->cbufs[i],
1227 FD_BUFFER_COLOR);
1228 }
1229 }
1230 }
1231
1232 static void
fd6_emit_tile(struct fd_batch * batch,const struct fd_tile * tile)1233 fd6_emit_tile(struct fd_batch *batch, const struct fd_tile *tile)
1234 {
1235 if (!use_hw_binning(batch)) {
1236 fd6_emit_ib(batch->gmem, batch->draw);
1237 } else {
1238 emit_conditional_ib(batch, tile, batch->draw);
1239 }
1240
1241 if (batch->epilogue)
1242 fd6_emit_ib(batch->gmem, batch->epilogue);
1243 }
1244
1245 static void
fd6_emit_tile_gmem2mem(struct fd_batch * batch,const struct fd_tile * tile)1246 fd6_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
1247 {
1248 struct fd_ringbuffer *ring = batch->gmem;
1249
1250 if (use_hw_binning(batch)) {
1251 OUT_PKT7(ring, CP_SET_MARKER, 1);
1252 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_ENDVIS));
1253 }
1254
1255 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1256 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1257 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1258 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1259 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1260 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1261
1262 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1263 OUT_RING(ring, 0x0);
1264
1265 emit_marker6(ring, 7);
1266 OUT_PKT7(ring, CP_SET_MARKER, 1);
1267 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_RESOLVE));
1268 emit_marker6(ring, 7);
1269
1270 fd_log(batch, "TILE: START RESOLVE");
1271 if (batch->fast_cleared || !use_hw_binning(batch)) {
1272 fd6_emit_ib(batch->gmem, batch->tile_fini);
1273 } else {
1274 emit_conditional_ib(batch, tile, batch->tile_fini);
1275 }
1276 fd_log(batch, "TILE: END RESOLVE");
1277 }
1278
1279 static void
fd6_emit_tile_fini(struct fd_batch * batch)1280 fd6_emit_tile_fini(struct fd_batch *batch)
1281 {
1282 struct fd_ringbuffer *ring = batch->gmem;
1283
1284 OUT_PKT4(ring, REG_A6XX_GRAS_LRZ_CNTL, 1);
1285 OUT_RING(ring, A6XX_GRAS_LRZ_CNTL_ENABLE);
1286
1287 fd6_emit_lrz_flush(ring);
1288
1289 fd6_event_write(batch, ring, PC_CCU_RESOLVE_TS, true);
1290
1291 if (use_hw_binning(batch)) {
1292 check_vsc_overflow(batch->ctx);
1293 }
1294 }
1295
1296 static void
emit_sysmem_clears(struct fd_batch * batch,struct fd_ringbuffer * ring)1297 emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
1298 {
1299 struct fd_context *ctx = batch->ctx;
1300 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1301
1302 uint32_t buffers = batch->fast_cleared;
1303
1304 if (buffers & PIPE_CLEAR_COLOR) {
1305 for (int i = 0; i < pfb->nr_cbufs; i++) {
1306 union pipe_color_union *color = &batch->clear_color[i];
1307
1308 if (!pfb->cbufs[i])
1309 continue;
1310
1311 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1312 continue;
1313
1314 fd6_clear_surface(ctx, ring,
1315 pfb->cbufs[i], pfb->width, pfb->height, color);
1316 }
1317 }
1318 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
1319 union pipe_color_union value = {};
1320
1321 const bool has_depth = pfb->zsbuf;
1322 struct pipe_resource *separate_stencil =
1323 has_depth && fd_resource(pfb->zsbuf->texture)->stencil ?
1324 &fd_resource(pfb->zsbuf->texture)->stencil->base : NULL;
1325
1326 if ((has_depth && (buffers & PIPE_CLEAR_DEPTH)) ||
1327 (!separate_stencil && (buffers & PIPE_CLEAR_STENCIL))) {
1328 value.f[0] = batch->clear_depth;
1329 value.ui[1] = batch->clear_stencil;
1330 fd6_clear_surface(ctx, ring,
1331 pfb->zsbuf, pfb->width, pfb->height, &value);
1332 }
1333
1334 if (separate_stencil && (buffers & PIPE_CLEAR_STENCIL)) {
1335 value.ui[0] = batch->clear_stencil;
1336
1337 struct pipe_surface stencil_surf = *pfb->zsbuf;
1338 stencil_surf.format = PIPE_FORMAT_S8_UINT;
1339 stencil_surf.texture = separate_stencil;
1340
1341 fd6_clear_surface(ctx, ring,
1342 &stencil_surf, pfb->width, pfb->height, &value);
1343 }
1344 }
1345
1346 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
1347 }
1348
1349 static void
setup_tess_buffers(struct fd_batch * batch,struct fd_ringbuffer * ring)1350 setup_tess_buffers(struct fd_batch *batch, struct fd_ringbuffer *ring)
1351 {
1352 struct fd_context *ctx = batch->ctx;
1353
1354 batch->tessfactor_bo = fd_bo_new(ctx->screen->dev,
1355 batch->tessfactor_size,
1356 DRM_FREEDRENO_GEM_TYPE_KMEM, "tessfactor");
1357
1358 batch->tessparam_bo = fd_bo_new(ctx->screen->dev,
1359 batch->tessparam_size,
1360 DRM_FREEDRENO_GEM_TYPE_KMEM, "tessparam");
1361
1362 OUT_PKT4(ring, REG_A6XX_PC_TESSFACTOR_ADDR_LO, 2);
1363 OUT_RELOC(ring, batch->tessfactor_bo, 0, 0, 0);
1364
1365 batch->tess_addrs_constobj->cur = batch->tess_addrs_constobj->start;
1366 OUT_RELOC(batch->tess_addrs_constobj, batch->tessparam_bo, 0, 0, 0);
1367 OUT_RELOC(batch->tess_addrs_constobj, batch->tessfactor_bo, 0, 0, 0);
1368 }
1369
1370 static void
fd6_emit_sysmem_prep(struct fd_batch * batch)1371 fd6_emit_sysmem_prep(struct fd_batch *batch)
1372 {
1373 struct fd_ringbuffer *ring = batch->gmem;
1374 struct fd_screen *screen = batch->ctx->screen;
1375
1376 fd6_emit_restore(batch, ring);
1377 fd6_emit_lrz_flush(ring);
1378
1379 if (batch->prologue) {
1380 fd_log(batch, "START PROLOGUE");
1381 fd6_emit_ib(ring, batch->prologue);
1382 fd_log(batch, "END PROLOGUE");
1383 }
1384
1385 /* remaining setup below here does not apply to blit/compute: */
1386 if (batch->nondraw)
1387 return;
1388
1389 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
1390
1391 if (pfb->width > 0 && pfb->height > 0)
1392 set_scissor(ring, 0, 0, pfb->width - 1, pfb->height - 1);
1393 else
1394 set_scissor(ring, 0, 0, 0, 0);
1395
1396 set_window_offset(ring, 0, 0);
1397
1398 set_bin_size(ring, 0, 0, 0xc00000); /* 0xc00000 = BYPASS? */
1399
1400 emit_sysmem_clears(batch, ring);
1401
1402 emit_marker6(ring, 7);
1403 OUT_PKT7(ring, CP_SET_MARKER, 1);
1404 OUT_RING(ring, A6XX_CP_SET_MARKER_0_MODE(RM6_BYPASS));
1405 emit_marker6(ring, 7);
1406
1407 if (batch->tessellation)
1408 setup_tess_buffers(batch, ring);
1409
1410 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1411 OUT_RING(ring, 0x0);
1412
1413 /* blob controls "local" in IB2, but I think that is not required */
1414 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_LOCAL, 1);
1415 OUT_RING(ring, 0x1);
1416
1417 fd6_event_write(batch, ring, PC_CCU_INVALIDATE_COLOR, false);
1418 fd6_cache_inv(batch, ring);
1419
1420 fd_wfi(batch, ring);
1421 OUT_REG(ring, A6XX_RB_CCU_CNTL(.offset = screen->info.a6xx.ccu_offset_bypass));
1422
1423 /* enable stream-out, with sysmem there is only one pass: */
1424 OUT_REG(ring, A6XX_VPC_SO_DISABLE(false));
1425
1426 OUT_PKT7(ring, CP_SET_VISIBILITY_OVERRIDE, 1);
1427 OUT_RING(ring, 0x1);
1428
1429 emit_zs(ring, pfb->zsbuf, NULL);
1430 emit_mrt(ring, pfb, NULL);
1431 emit_msaa(ring, pfb->samples);
1432
1433 update_render_cntl(batch, pfb, false);
1434 }
1435
1436 static void
fd6_emit_sysmem_fini(struct fd_batch * batch)1437 fd6_emit_sysmem_fini(struct fd_batch *batch)
1438 {
1439 struct fd_ringbuffer *ring = batch->gmem;
1440
1441 if (batch->epilogue)
1442 fd6_emit_ib(batch->gmem, batch->epilogue);
1443
1444 OUT_PKT7(ring, CP_SKIP_IB2_ENABLE_GLOBAL, 1);
1445 OUT_RING(ring, 0x0);
1446
1447 fd6_emit_lrz_flush(ring);
1448
1449 fd6_event_write(batch, ring, PC_CCU_FLUSH_COLOR_TS, true);
1450 fd6_event_write(batch, ring, PC_CCU_FLUSH_DEPTH_TS, true);
1451 }
1452
1453 void
fd6_gmem_init(struct pipe_context * pctx)1454 fd6_gmem_init(struct pipe_context *pctx)
1455 {
1456 struct fd_context *ctx = fd_context(pctx);
1457
1458 ctx->emit_tile_init = fd6_emit_tile_init;
1459 ctx->emit_tile_prep = fd6_emit_tile_prep;
1460 ctx->emit_tile_mem2gmem = fd6_emit_tile_mem2gmem;
1461 ctx->emit_tile_renderprep = fd6_emit_tile_renderprep;
1462 ctx->emit_tile = fd6_emit_tile;
1463 ctx->emit_tile_gmem2mem = fd6_emit_tile_gmem2mem;
1464 ctx->emit_tile_fini = fd6_emit_tile_fini;
1465 ctx->emit_sysmem_prep = fd6_emit_sysmem_prep;
1466 ctx->emit_sysmem_fini = fd6_emit_sysmem_fini;
1467 }
1468