1 /*
2 * Copyright © 2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/format/u_format.h"
25 #include "v3d_context.h"
26 #include "broadcom/common/v3d_tiling.h"
27 #include "broadcom/common/v3d_macros.h"
28 #include "broadcom/cle/v3dx_pack.h"
29
30 #define PIPE_CLEAR_COLOR_BUFFERS (PIPE_CLEAR_COLOR0 | \
31 PIPE_CLEAR_COLOR1 | \
32 PIPE_CLEAR_COLOR2 | \
33 PIPE_CLEAR_COLOR3) \
34
35 #define PIPE_FIRST_COLOR_BUFFER_BIT (ffs(PIPE_CLEAR_COLOR0) - 1)
36
37 /* The HW queues up the load until the tile coordinates show up, but can only
38 * track one at a time. If we need to do more than one load, then we need to
39 * flush out the previous load by emitting the tile coordinates and doing a
40 * dummy store.
41 */
42 static void
flush_last_load(struct v3d_cl * cl)43 flush_last_load(struct v3d_cl *cl)
44 {
45 if (V3D_VERSION >= 40)
46 return;
47
48 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
49 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
50 store.buffer_to_store = NONE;
51 }
52 }
53
54 static void
load_general(struct v3d_cl * cl,struct pipe_surface * psurf,int buffer,int layer,uint32_t pipe_bit,uint32_t * loads_pending)55 load_general(struct v3d_cl *cl, struct pipe_surface *psurf, int buffer,
56 int layer, uint32_t pipe_bit, uint32_t *loads_pending)
57 {
58 struct v3d_surface *surf = v3d_surface(psurf);
59 bool separate_stencil = surf->separate_stencil && buffer == STENCIL;
60 if (separate_stencil) {
61 psurf = surf->separate_stencil;
62 surf = v3d_surface(psurf);
63 }
64
65 struct v3d_resource *rsc = v3d_resource(psurf->texture);
66
67 uint32_t layer_offset =
68 v3d_layer_offset(&rsc->base, psurf->u.tex.level,
69 psurf->u.tex.first_layer + layer);
70 cl_emit(cl, LOAD_TILE_BUFFER_GENERAL, load) {
71 load.buffer_to_load = buffer;
72 load.address = cl_address(rsc->bo, layer_offset);
73
74 #if V3D_VERSION >= 40
75 load.memory_format = surf->tiling;
76 if (separate_stencil)
77 load.input_image_format = V3D_OUTPUT_IMAGE_FORMAT_S8;
78 else
79 load.input_image_format = surf->format;
80 load.r_b_swap = surf->swap_rb;
81 load.force_alpha_1 = util_format_has_alpha1(psurf->format);
82 if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
83 surf->tiling == V3D_TILING_UIF_XOR) {
84 load.height_in_ub_or_stride =
85 surf->padded_height_of_output_image_in_uif_blocks;
86 } else if (surf->tiling == V3D_TILING_RASTER) {
87 struct v3d_resource_slice *slice =
88 &rsc->slices[psurf->u.tex.level];
89 load.height_in_ub_or_stride = slice->stride;
90 }
91
92 if (psurf->texture->nr_samples > 1)
93 load.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
94 else
95 load.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
96
97 #else /* V3D_VERSION < 40 */
98 /* Can't do raw ZSTENCIL loads -- need to load/store them to
99 * separate buffers for Z and stencil.
100 */
101 assert(buffer != ZSTENCIL);
102 load.raw_mode = true;
103 load.padded_height_of_output_image_in_uif_blocks =
104 surf->padded_height_of_output_image_in_uif_blocks;
105 #endif /* V3D_VERSION < 40 */
106 }
107
108 *loads_pending &= ~pipe_bit;
109 if (*loads_pending)
110 flush_last_load(cl);
111 }
112
113 static void
store_general(struct v3d_job * job,struct v3d_cl * cl,struct pipe_surface * psurf,int layer,int buffer,int pipe_bit,uint32_t * stores_pending,bool general_color_clear,bool resolve_4x)114 store_general(struct v3d_job *job,
115 struct v3d_cl *cl, struct pipe_surface *psurf,
116 int layer, int buffer, int pipe_bit,
117 uint32_t *stores_pending, bool general_color_clear,
118 bool resolve_4x)
119 {
120 struct v3d_surface *surf = v3d_surface(psurf);
121 bool separate_stencil = surf->separate_stencil && buffer == STENCIL;
122 if (separate_stencil) {
123 psurf = surf->separate_stencil;
124 surf = v3d_surface(psurf);
125 }
126
127 *stores_pending &= ~pipe_bit;
128 bool last_store = !(*stores_pending);
129
130 struct v3d_resource *rsc = v3d_resource(psurf->texture);
131
132 rsc->writes++;
133
134 uint32_t layer_offset =
135 v3d_layer_offset(&rsc->base, psurf->u.tex.level,
136 psurf->u.tex.first_layer + layer);
137 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
138 store.buffer_to_store = buffer;
139 store.address = cl_address(rsc->bo, layer_offset);
140
141 #if V3D_VERSION >= 40
142 store.clear_buffer_being_stored = false;
143
144 if (separate_stencil)
145 store.output_image_format = V3D_OUTPUT_IMAGE_FORMAT_S8;
146 else
147 store.output_image_format = surf->format;
148
149 store.r_b_swap = surf->swap_rb;
150 store.memory_format = surf->tiling;
151
152 if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
153 surf->tiling == V3D_TILING_UIF_XOR) {
154 store.height_in_ub_or_stride =
155 surf->padded_height_of_output_image_in_uif_blocks;
156 } else if (surf->tiling == V3D_TILING_RASTER) {
157 struct v3d_resource_slice *slice =
158 &rsc->slices[psurf->u.tex.level];
159 store.height_in_ub_or_stride = slice->stride;
160 }
161
162 assert(!resolve_4x || job->bbuf);
163 if (psurf->texture->nr_samples > 1)
164 store.decimate_mode = V3D_DECIMATE_MODE_ALL_SAMPLES;
165 else if (resolve_4x && job->bbuf->texture->nr_samples > 1)
166 store.decimate_mode = V3D_DECIMATE_MODE_4X;
167 else
168 store.decimate_mode = V3D_DECIMATE_MODE_SAMPLE_0;
169
170 #else /* V3D_VERSION < 40 */
171 /* Can't do raw ZSTENCIL stores -- need to load/store them to
172 * separate buffers for Z and stencil.
173 */
174 assert(buffer != ZSTENCIL);
175 store.raw_mode = true;
176 if (!last_store) {
177 store.disable_color_buffers_clear_on_write = true;
178 store.disable_z_buffer_clear_on_write = true;
179 store.disable_stencil_buffer_clear_on_write = true;
180 } else {
181 store.disable_color_buffers_clear_on_write =
182 !(((pipe_bit & PIPE_CLEAR_COLOR_BUFFERS) &&
183 general_color_clear &&
184 (job->clear & pipe_bit)));
185 store.disable_z_buffer_clear_on_write =
186 !(job->clear & PIPE_CLEAR_DEPTH);
187 store.disable_stencil_buffer_clear_on_write =
188 !(job->clear & PIPE_CLEAR_STENCIL);
189 }
190 store.padded_height_of_output_image_in_uif_blocks =
191 surf->padded_height_of_output_image_in_uif_blocks;
192 #endif /* V3D_VERSION < 40 */
193 }
194
195 /* There must be a TILE_COORDINATES_IMPLICIT between each store. */
196 if (V3D_VERSION < 40 && !last_store) {
197 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
198 }
199 }
200
201 static int
zs_buffer_from_pipe_bits(int pipe_clear_bits)202 zs_buffer_from_pipe_bits(int pipe_clear_bits)
203 {
204 switch (pipe_clear_bits & PIPE_CLEAR_DEPTHSTENCIL) {
205 case PIPE_CLEAR_DEPTHSTENCIL:
206 return ZSTENCIL;
207 case PIPE_CLEAR_DEPTH:
208 return Z;
209 case PIPE_CLEAR_STENCIL:
210 return STENCIL;
211 default:
212 return NONE;
213 }
214 }
215
216 static void
v3d_rcl_emit_loads(struct v3d_job * job,struct v3d_cl * cl,int layer)217 v3d_rcl_emit_loads(struct v3d_job *job, struct v3d_cl *cl, int layer)
218 {
219 /* When blitting, no color or zs buffer is loaded; instead the blit
220 * source buffer is loaded for the aspects that we are going to blit.
221 */
222 assert(!job->bbuf || job->load == 0);
223 assert(!job->bbuf || job->nr_cbufs <= 1);
224 assert(!job->bbuf || V3D_VERSION >= 40);
225
226 uint32_t loads_pending = job->bbuf ? job->store : job->load;
227
228 for (int i = 0; i < job->nr_cbufs; i++) {
229 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
230 if (!(loads_pending & bit))
231 continue;
232
233 struct pipe_surface *psurf = job->bbuf ? job->bbuf : job->cbufs[i];
234 assert(!job->bbuf || i == 0);
235
236 if (!psurf || (V3D_VERSION < 40 &&
237 psurf->texture->nr_samples <= 1)) {
238 continue;
239 }
240
241 load_general(cl, psurf, RENDER_TARGET_0 + i, layer,
242 bit, &loads_pending);
243 }
244
245 if ((loads_pending & PIPE_CLEAR_DEPTHSTENCIL) &&
246 (V3D_VERSION >= 40 ||
247 (job->zsbuf && job->zsbuf->texture->nr_samples > 1))) {
248 assert(!job->early_zs_clear);
249 struct pipe_surface *src = job->bbuf ? job->bbuf : job->zsbuf;
250 struct v3d_resource *rsc = v3d_resource(src->texture);
251
252 if (rsc->separate_stencil &&
253 (loads_pending & PIPE_CLEAR_STENCIL)) {
254 load_general(cl, src,
255 STENCIL, layer,
256 PIPE_CLEAR_STENCIL,
257 &loads_pending);
258 }
259
260 if (loads_pending & PIPE_CLEAR_DEPTHSTENCIL) {
261 load_general(cl, src,
262 zs_buffer_from_pipe_bits(loads_pending),
263 layer,
264 loads_pending & PIPE_CLEAR_DEPTHSTENCIL,
265 &loads_pending);
266 }
267 }
268
269 #if V3D_VERSION < 40
270 /* The initial reload will be queued until we get the
271 * tile coordinates.
272 */
273 if (loads_pending) {
274 cl_emit(cl, RELOAD_TILE_COLOR_BUFFER, load) {
275 load.disable_color_buffer_load =
276 (~loads_pending &
277 PIPE_CLEAR_COLOR_BUFFERS) >>
278 PIPE_FIRST_COLOR_BUFFER_BIT;
279 load.enable_z_load =
280 loads_pending & PIPE_CLEAR_DEPTH;
281 load.enable_stencil_load =
282 loads_pending & PIPE_CLEAR_STENCIL;
283 }
284 }
285 #else /* V3D_VERSION >= 40 */
286 assert(!loads_pending);
287 cl_emit(cl, END_OF_LOADS, end);
288 #endif
289 }
290
291 static void
v3d_rcl_emit_stores(struct v3d_job * job,struct v3d_cl * cl,int layer)292 v3d_rcl_emit_stores(struct v3d_job *job, struct v3d_cl *cl, int layer)
293 {
294 #if V3D_VERSION < 40
295 UNUSED bool needs_color_clear = job->clear & PIPE_CLEAR_COLOR_BUFFERS;
296 UNUSED bool needs_z_clear = job->clear & PIPE_CLEAR_DEPTH;
297 UNUSED bool needs_s_clear = job->clear & PIPE_CLEAR_STENCIL;
298
299 /* For clearing color in a TLB general on V3D 3.3:
300 *
301 * - NONE buffer store clears all TLB color buffers.
302 * - color buffer store clears just the TLB color buffer being stored.
303 * - Z/S buffers store may not clear the TLB color buffer.
304 *
305 * And on V3D 4.1, we only have one flag for "clear the buffer being
306 * stored" in the general packet, and a separate packet to clear all
307 * color TLB buffers.
308 *
309 * As a result, we only bother flagging TLB color clears in a general
310 * packet when we don't have to emit a separate packet to clear all
311 * TLB color buffers.
312 */
313 bool general_color_clear = (needs_color_clear &&
314 (job->clear & PIPE_CLEAR_COLOR_BUFFERS) ==
315 (job->store & PIPE_CLEAR_COLOR_BUFFERS));
316 #else
317 bool general_color_clear = false;
318 #endif
319
320 uint32_t stores_pending = job->store;
321
322 /* For V3D 4.1, use general stores for all TLB stores.
323 *
324 * For V3D 3.3, we only use general stores to do raw stores for any
325 * MSAA surfaces. These output UIF tiled images where each 4x MSAA
326 * pixel is a 2x2 quad, and the format will be that of the
327 * internal_type/internal_bpp, rather than the format from GL's
328 * perspective. Non-MSAA surfaces will use
329 * STORE_MULTI_SAMPLE_RESOLVED_TILE_COLOR_BUFFER_EXTENDED.
330 */
331 assert(!job->bbuf || job->nr_cbufs <= 1);
332 for (int i = 0; i < job->nr_cbufs; i++) {
333 uint32_t bit = PIPE_CLEAR_COLOR0 << i;
334 if (!(job->store & bit))
335 continue;
336
337 struct pipe_surface *psurf = job->cbufs[i];
338 if (!psurf ||
339 (V3D_VERSION < 40 && psurf->texture->nr_samples <= 1)) {
340 continue;
341 }
342
343 store_general(job, cl, psurf, layer, RENDER_TARGET_0 + i, bit,
344 &stores_pending, general_color_clear, job->bbuf);
345 }
346
347 if (job->store & PIPE_CLEAR_DEPTHSTENCIL && job->zsbuf &&
348 !(V3D_VERSION < 40 && job->zsbuf->texture->nr_samples <= 1)) {
349 assert(!job->early_zs_clear);
350 struct v3d_resource *rsc = v3d_resource(job->zsbuf->texture);
351 if (rsc->separate_stencil) {
352 if (job->store & PIPE_CLEAR_DEPTH) {
353 store_general(job, cl, job->zsbuf, layer,
354 Z, PIPE_CLEAR_DEPTH,
355 &stores_pending,
356 general_color_clear,
357 false);
358 }
359
360 if (job->store & PIPE_CLEAR_STENCIL) {
361 store_general(job, cl, job->zsbuf, layer,
362 STENCIL, PIPE_CLEAR_STENCIL,
363 &stores_pending,
364 general_color_clear,
365 false);
366 }
367 } else {
368 store_general(job, cl, job->zsbuf, layer,
369 zs_buffer_from_pipe_bits(job->store),
370 job->store & PIPE_CLEAR_DEPTHSTENCIL,
371 &stores_pending, general_color_clear,
372 false);
373 }
374 }
375
376 #if V3D_VERSION < 40
377 if (stores_pending) {
378 cl_emit(cl, STORE_MULTI_SAMPLE_RESOLVED_TILE_COLOR_BUFFER_EXTENDED, store) {
379
380 store.disable_color_buffer_write =
381 (~stores_pending >>
382 PIPE_FIRST_COLOR_BUFFER_BIT) & 0xf;
383 store.enable_z_write = stores_pending & PIPE_CLEAR_DEPTH;
384 store.enable_stencil_write = stores_pending & PIPE_CLEAR_STENCIL;
385
386 /* Note that when set this will clear all of the color
387 * buffers.
388 */
389 store.disable_color_buffers_clear_on_write =
390 !needs_color_clear;
391 store.disable_z_buffer_clear_on_write =
392 !needs_z_clear;
393 store.disable_stencil_buffer_clear_on_write =
394 !needs_s_clear;
395 };
396 } else if (needs_color_clear && !general_color_clear) {
397 /* If we didn't do our color clears in the general packet,
398 * then emit a packet to clear all the TLB color buffers now.
399 */
400 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
401 store.buffer_to_store = NONE;
402 }
403 }
404 #else /* V3D_VERSION >= 40 */
405 /* If we're emitting an RCL with GL_ARB_framebuffer_no_attachments,
406 * we still need to emit some sort of store.
407 */
408 if (!job->store) {
409 cl_emit(cl, STORE_TILE_BUFFER_GENERAL, store) {
410 store.buffer_to_store = NONE;
411 }
412 }
413
414 assert(!stores_pending);
415
416 /* GFXH-1461/GFXH-1689: The per-buffer store command's clear
417 * buffer bit is broken for depth/stencil. In addition, the
418 * clear packet's Z/S bit is broken, but the RTs bit ends up
419 * clearing Z/S.
420 */
421 if (job->clear) {
422 cl_emit(cl, CLEAR_TILE_BUFFERS, clear) {
423 clear.clear_z_stencil_buffer = !job->early_zs_clear;
424 clear.clear_all_render_targets = true;
425 }
426 }
427 #endif /* V3D_VERSION >= 40 */
428 }
429
430 static void
v3d_rcl_emit_generic_per_tile_list(struct v3d_job * job,int layer)431 v3d_rcl_emit_generic_per_tile_list(struct v3d_job *job, int layer)
432 {
433 /* Emit the generic list in our indirect state -- the rcl will just
434 * have pointers into it.
435 */
436 struct v3d_cl *cl = &job->indirect;
437 v3d_cl_ensure_space(cl, 200, 1);
438 struct v3d_cl_reloc tile_list_start = cl_get_address(cl);
439
440 if (V3D_VERSION >= 40) {
441 /* V3D 4.x only requires a single tile coordinates, and
442 * END_OF_LOADS switches us between loading and rendering.
443 */
444 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
445 }
446
447 v3d_rcl_emit_loads(job, cl, layer);
448
449 if (V3D_VERSION < 40) {
450 /* Tile Coordinates triggers the last reload and sets where
451 * the stores go. There must be one per store packet.
452 */
453 cl_emit(cl, TILE_COORDINATES_IMPLICIT, coords);
454 }
455
456 /* The binner starts out writing tiles assuming that the initial mode
457 * is triangles, so make sure that's the case.
458 */
459 cl_emit(cl, PRIM_LIST_FORMAT, fmt) {
460 fmt.primitive_type = LIST_TRIANGLES;
461 }
462
463 #if V3D_VERSION >= 41
464 /* PTB assumes that value to be 0, but hw will not set it. */
465 cl_emit(cl, SET_INSTANCEID, set) {
466 set.instance_id = 0;
467 }
468 #endif
469
470 cl_emit(cl, BRANCH_TO_IMPLICIT_TILE_LIST, branch);
471
472 v3d_rcl_emit_stores(job, cl, layer);
473
474 #if V3D_VERSION >= 40
475 cl_emit(cl, END_OF_TILE_MARKER, end);
476 #endif
477
478 cl_emit(cl, RETURN_FROM_SUB_LIST, ret);
479
480 cl_emit(&job->rcl, START_ADDRESS_OF_GENERIC_TILE_LIST, branch) {
481 branch.start = tile_list_start;
482 branch.end = cl_get_address(cl);
483 }
484 }
485
486 #if V3D_VERSION >= 40
487 static void
v3d_setup_render_target(struct v3d_job * job,int cbuf,uint32_t * rt_bpp,uint32_t * rt_type,uint32_t * rt_clamp)488 v3d_setup_render_target(struct v3d_job *job, int cbuf,
489 uint32_t *rt_bpp, uint32_t *rt_type, uint32_t *rt_clamp)
490 {
491 if (!job->cbufs[cbuf])
492 return;
493
494 struct v3d_surface *surf = v3d_surface(job->cbufs[cbuf]);
495 *rt_bpp = surf->internal_bpp;
496 if (job->bbuf) {
497 struct v3d_surface *bsurf = v3d_surface(job->bbuf);
498 *rt_bpp = MAX2(*rt_bpp, bsurf->internal_bpp);
499 }
500 *rt_type = surf->internal_type;
501 *rt_clamp = V3D_RENDER_TARGET_CLAMP_NONE;
502 }
503
504 #else /* V3D_VERSION < 40 */
505
506 static void
v3d_emit_z_stencil_config(struct v3d_job * job,struct v3d_surface * surf,struct v3d_resource * rsc,bool is_separate_stencil)507 v3d_emit_z_stencil_config(struct v3d_job *job, struct v3d_surface *surf,
508 struct v3d_resource *rsc, bool is_separate_stencil)
509 {
510 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_Z_STENCIL, zs) {
511 zs.address = cl_address(rsc->bo, surf->offset);
512
513 if (!is_separate_stencil) {
514 zs.internal_type = surf->internal_type;
515 zs.output_image_format = surf->format;
516 } else {
517 zs.z_stencil_id = 1; /* Separate stencil */
518 }
519
520 zs.padded_height_of_output_image_in_uif_blocks =
521 surf->padded_height_of_output_image_in_uif_blocks;
522
523 assert(surf->tiling != V3D_TILING_RASTER);
524 zs.memory_format = surf->tiling;
525 }
526
527 if (job->store & (is_separate_stencil ?
528 PIPE_CLEAR_STENCIL :
529 PIPE_CLEAR_DEPTHSTENCIL)) {
530 rsc->writes++;
531 }
532 }
533 #endif /* V3D_VERSION < 40 */
534
535 #define div_round_up(a, b) (((a) + (b) - 1) / b)
536
537 static bool
supertile_in_job_scissors(struct v3d_job * job,uint32_t x,uint32_t y,uint32_t w,uint32_t h)538 supertile_in_job_scissors(struct v3d_job *job,
539 uint32_t x, uint32_t y, uint32_t w, uint32_t h)
540 {
541 if (job->scissor.disabled || job->scissor.count == 0)
542 return true;
543
544 const uint32_t min_x = x * w;
545 const uint32_t min_y = y * h;
546 const uint32_t max_x = min_x + w - 1;
547 const uint32_t max_y = min_y + h - 1;
548
549 for (uint32_t i = 0; i < job->scissor.count; i++) {
550 const uint32_t min_s_x = job->scissor.rects[i].min_x;
551 const uint32_t min_s_y = job->scissor.rects[i].min_y;
552 const uint32_t max_s_x = job->scissor.rects[i].max_x;
553 const uint32_t max_s_y = job->scissor.rects[i].max_y;
554
555 if (max_x < min_s_x || min_x > max_s_x ||
556 max_y < min_s_y || min_y > max_s_y) {
557 continue;
558 }
559
560 return true;
561 }
562
563 return false;
564 }
565
566 #if V3D_VERSION >= 40
567 static inline bool
do_double_initial_tile_clear(const struct v3d_job * job)568 do_double_initial_tile_clear(const struct v3d_job *job)
569 {
570 /* Our rendering code emits an initial clear per layer, unlike the
571 * Vulkan driver, which only executes a single initial clear for all
572 * layers. This is because in GL we don't use the
573 * 'clear_buffer_being_stored' bit when storing tiles, so each layer
574 * needs the iniital clear. This is also why this helper, unlike the
575 * Vulkan version, doesn't check the layer count to decide if double
576 * clear for double buffer mode is required.
577 */
578 return job->double_buffer &&
579 (job->draw_tiles_x > 1 || job->draw_tiles_y > 1);
580 }
581 #endif
582
583 static void
emit_render_layer(struct v3d_job * job,uint32_t layer)584 emit_render_layer(struct v3d_job *job, uint32_t layer)
585 {
586 uint32_t supertile_w = 1, supertile_h = 1;
587
588 /* If doing multicore binning, we would need to initialize each
589 * core's tile list here.
590 */
591 uint32_t tile_alloc_offset =
592 layer * job->draw_tiles_x * job->draw_tiles_y * 64;
593 cl_emit(&job->rcl, MULTICORE_RENDERING_TILE_LIST_SET_BASE, list) {
594 list.address = cl_address(job->tile_alloc, tile_alloc_offset);
595 }
596
597 cl_emit(&job->rcl, MULTICORE_RENDERING_SUPERTILE_CFG, config) {
598 uint32_t frame_w_in_supertiles, frame_h_in_supertiles;
599 const uint32_t max_supertiles = 256;
600
601 /* Size up our supertiles until we get under the limit. */
602 for (;;) {
603 frame_w_in_supertiles = div_round_up(job->draw_tiles_x,
604 supertile_w);
605 frame_h_in_supertiles = div_round_up(job->draw_tiles_y,
606 supertile_h);
607 if (frame_w_in_supertiles *
608 frame_h_in_supertiles < max_supertiles) {
609 break;
610 }
611
612 if (supertile_w < supertile_h)
613 supertile_w++;
614 else
615 supertile_h++;
616 }
617
618 config.number_of_bin_tile_lists = 1;
619 config.total_frame_width_in_tiles = job->draw_tiles_x;
620 config.total_frame_height_in_tiles = job->draw_tiles_y;
621
622 config.supertile_width_in_tiles = supertile_w;
623 config.supertile_height_in_tiles = supertile_h;
624
625 config.total_frame_width_in_supertiles = frame_w_in_supertiles;
626 config.total_frame_height_in_supertiles = frame_h_in_supertiles;
627 }
628
629 /* Start by clearing the tile buffer. */
630 cl_emit(&job->rcl, TILE_COORDINATES, coords) {
631 coords.tile_column_number = 0;
632 coords.tile_row_number = 0;
633 }
634
635 /* Emit an initial clear of the tile buffers. This is necessary
636 * for any buffers that should be cleared (since clearing
637 * normally happens at the *end* of the generic tile list), but
638 * it's also nice to clear everything so the first tile doesn't
639 * inherit any contents from some previous frame.
640 *
641 * Also, implement the GFXH-1742 workaround. There's a race in
642 * the HW between the RCL updating the TLB's internal type/size
643 * and thespawning of the QPU instances using the TLB's current
644 * internal type/size. To make sure the QPUs get the right
645 * state, we need 1 dummy store in between internal type/size
646 * changes on V3D 3.x, and 2 dummy stores on 4.x.
647 */
648 #if V3D_VERSION < 40
649 cl_emit(&job->rcl, STORE_TILE_BUFFER_GENERAL, store) {
650 store.buffer_to_store = NONE;
651 }
652 #else
653 for (int i = 0; i < 2; i++) {
654 if (i > 0)
655 cl_emit(&job->rcl, TILE_COORDINATES, coords);
656 cl_emit(&job->rcl, END_OF_LOADS, end);
657 cl_emit(&job->rcl, STORE_TILE_BUFFER_GENERAL, store) {
658 store.buffer_to_store = NONE;
659 }
660 if (i == 0 || do_double_initial_tile_clear(job)) {
661 cl_emit(&job->rcl, CLEAR_TILE_BUFFERS, clear) {
662 clear.clear_z_stencil_buffer = !job->early_zs_clear;
663 clear.clear_all_render_targets = true;
664 }
665 }
666 cl_emit(&job->rcl, END_OF_TILE_MARKER, end);
667 }
668 #endif
669
670 cl_emit(&job->rcl, FLUSH_VCD_CACHE, flush);
671
672 v3d_rcl_emit_generic_per_tile_list(job, layer);
673
674 /* XXX perf: We should expose GL_MESA_tile_raster_order to
675 * improve X11 performance, but we should use Morton order
676 * otherwise to improve cache locality.
677 */
678 uint32_t supertile_w_in_pixels = job->tile_width * supertile_w;
679 uint32_t supertile_h_in_pixels = job->tile_height * supertile_h;
680 uint32_t min_x_supertile = job->draw_min_x / supertile_w_in_pixels;
681 uint32_t min_y_supertile = job->draw_min_y / supertile_h_in_pixels;
682
683 uint32_t max_x_supertile = 0;
684 uint32_t max_y_supertile = 0;
685 if (job->draw_max_x != 0 && job->draw_max_y != 0) {
686 max_x_supertile = (job->draw_max_x - 1) / supertile_w_in_pixels;
687 max_y_supertile = (job->draw_max_y - 1) / supertile_h_in_pixels;
688 }
689
690 for (int y = min_y_supertile; y <= max_y_supertile; y++) {
691 for (int x = min_x_supertile; x <= max_x_supertile; x++) {
692 if (supertile_in_job_scissors(job, x, y,
693 supertile_w_in_pixels,
694 supertile_h_in_pixels)) {
695 cl_emit(&job->rcl, SUPERTILE_COORDINATES, coords) {
696 coords.column_number_in_supertiles = x;
697 coords.row_number_in_supertiles = y;
698 }
699 }
700 }
701 }
702 }
703
704 void
v3dX(emit_rcl)705 v3dX(emit_rcl)(struct v3d_job *job)
706 {
707 /* The RCL list should be empty. */
708 assert(!job->rcl.bo);
709
710 v3d_cl_ensure_space_with_branch(&job->rcl, 200 +
711 MAX2(job->num_layers, 1) * 256 *
712 cl_packet_length(SUPERTILE_COORDINATES));
713 job->submit.rcl_start = job->rcl.bo->offset;
714 v3d_job_add_bo(job, job->rcl.bo);
715
716 /* Common config must be the first TILE_RENDERING_MODE_CFG
717 * and Z_STENCIL_CLEAR_VALUES must be last. The ones in between are
718 * optional updates to the previous HW state.
719 */
720 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_COMMON, config) {
721 #if V3D_VERSION < 40
722 config.enable_z_store = job->store & PIPE_CLEAR_DEPTH;
723 config.enable_stencil_store = job->store & PIPE_CLEAR_STENCIL;
724 #else /* V3D_VERSION >= 40 */
725 if (job->zsbuf) {
726 struct v3d_surface *surf = v3d_surface(job->zsbuf);
727 config.internal_depth_type = surf->internal_type;
728 }
729 #endif /* V3D_VERSION >= 40 */
730
731 if (job->decided_global_ez_enable) {
732 switch (job->first_ez_state) {
733 case V3D_EZ_UNDECIDED:
734 case V3D_EZ_LT_LE:
735 config.early_z_disable = false;
736 config.early_z_test_and_update_direction =
737 EARLY_Z_DIRECTION_LT_LE;
738 break;
739 case V3D_EZ_GT_GE:
740 config.early_z_disable = false;
741 config.early_z_test_and_update_direction =
742 EARLY_Z_DIRECTION_GT_GE;
743 break;
744 case V3D_EZ_DISABLED:
745 config.early_z_disable = true;
746 }
747 } else {
748 assert(job->draw_calls_queued == 0);
749 config.early_z_disable = true;
750 }
751
752 #if V3D_VERSION >= 40
753 assert(job->zsbuf || config.early_z_disable);
754
755 job->early_zs_clear = (job->clear & PIPE_CLEAR_DEPTHSTENCIL) &&
756 !(job->load & PIPE_CLEAR_DEPTHSTENCIL) &&
757 !(job->store & PIPE_CLEAR_DEPTHSTENCIL);
758
759 config.early_depth_stencil_clear = job->early_zs_clear;
760 #endif /* V3D_VERSION >= 40 */
761
762 config.image_width_pixels = job->draw_width;
763 config.image_height_pixels = job->draw_height;
764
765 config.number_of_render_targets = MAX2(job->nr_cbufs, 1);
766
767 assert(!job->msaa || !job->double_buffer);
768 config.multisample_mode_4x = job->msaa;
769 config.double_buffer_in_non_ms_mode = job->double_buffer;
770
771 config.maximum_bpp_of_all_render_targets = job->internal_bpp;
772 }
773
774 for (int i = 0; i < job->nr_cbufs; i++) {
775 struct pipe_surface *psurf = job->cbufs[i];
776 if (!psurf)
777 continue;
778 struct v3d_surface *surf = v3d_surface(psurf);
779 struct v3d_resource *rsc = v3d_resource(psurf->texture);
780
781 UNUSED uint32_t config_pad = 0;
782 uint32_t clear_pad = 0;
783
784 /* XXX: Set the pad for raster. */
785 if (surf->tiling == V3D_TILING_UIF_NO_XOR ||
786 surf->tiling == V3D_TILING_UIF_XOR) {
787 int uif_block_height = v3d_utile_height(rsc->cpp) * 2;
788 uint32_t implicit_padded_height = (align(job->draw_height, uif_block_height) /
789 uif_block_height);
790 if (surf->padded_height_of_output_image_in_uif_blocks -
791 implicit_padded_height < 15) {
792 config_pad = (surf->padded_height_of_output_image_in_uif_blocks -
793 implicit_padded_height);
794 } else {
795 config_pad = 15;
796 clear_pad = surf->padded_height_of_output_image_in_uif_blocks;
797 }
798 }
799
800 #if V3D_VERSION < 40
801 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
802 rt.address = cl_address(rsc->bo, surf->offset);
803 rt.internal_type = surf->internal_type;
804 rt.output_image_format = surf->format;
805 rt.memory_format = surf->tiling;
806 rt.internal_bpp = surf->internal_bpp;
807 rt.render_target_number = i;
808 rt.pad = config_pad;
809
810 if (job->store & PIPE_CLEAR_COLOR0 << i)
811 rsc->writes++;
812 }
813 #endif /* V3D_VERSION < 40 */
814
815 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART1,
816 clear) {
817 clear.clear_color_low_32_bits = job->clear_color[i][0];
818 clear.clear_color_next_24_bits = job->clear_color[i][1] & 0xffffff;
819 clear.render_target_number = i;
820 };
821
822 if (surf->internal_bpp >= V3D_INTERNAL_BPP_64) {
823 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART2,
824 clear) {
825 clear.clear_color_mid_low_32_bits =
826 ((job->clear_color[i][1] >> 24) |
827 (job->clear_color[i][2] << 8));
828 clear.clear_color_mid_high_24_bits =
829 ((job->clear_color[i][2] >> 24) |
830 ((job->clear_color[i][3] & 0xffff) << 8));
831 clear.render_target_number = i;
832 };
833 }
834
835 if (surf->internal_bpp >= V3D_INTERNAL_BPP_128 || clear_pad) {
836 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_CLEAR_COLORS_PART3,
837 clear) {
838 clear.uif_padded_height_in_uif_blocks = clear_pad;
839 clear.clear_color_high_16_bits = job->clear_color[i][3] >> 16;
840 clear.render_target_number = i;
841 };
842 }
843 }
844
845 #if V3D_VERSION >= 40
846 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_COLOR, rt) {
847 v3d_setup_render_target(job, 0,
848 &rt.render_target_0_internal_bpp,
849 &rt.render_target_0_internal_type,
850 &rt.render_target_0_clamp);
851 v3d_setup_render_target(job, 1,
852 &rt.render_target_1_internal_bpp,
853 &rt.render_target_1_internal_type,
854 &rt.render_target_1_clamp);
855 v3d_setup_render_target(job, 2,
856 &rt.render_target_2_internal_bpp,
857 &rt.render_target_2_internal_type,
858 &rt.render_target_2_clamp);
859 v3d_setup_render_target(job, 3,
860 &rt.render_target_3_internal_bpp,
861 &rt.render_target_3_internal_type,
862 &rt.render_target_3_clamp);
863 }
864 #endif
865
866 #if V3D_VERSION < 40
867 /* TODO: Don't bother emitting if we don't load/clear Z/S. */
868 if (job->zsbuf) {
869 struct pipe_surface *psurf = job->zsbuf;
870 struct v3d_surface *surf = v3d_surface(psurf);
871 struct v3d_resource *rsc = v3d_resource(psurf->texture);
872
873 v3d_emit_z_stencil_config(job, surf, rsc, false);
874
875 /* Emit the separate stencil packet if we have a resource for
876 * it. The HW will only load/store this buffer if the
877 * Z/Stencil config doesn't have stencil in its format.
878 */
879 if (surf->separate_stencil) {
880 v3d_emit_z_stencil_config(job,
881 v3d_surface(surf->separate_stencil),
882 rsc->separate_stencil, true);
883 }
884 }
885 #endif /* V3D_VERSION < 40 */
886
887 /* Ends rendering mode config. */
888 cl_emit(&job->rcl, TILE_RENDERING_MODE_CFG_ZS_CLEAR_VALUES,
889 clear) {
890 clear.z_clear_value = job->clear_z;
891 clear.stencil_clear_value = job->clear_s;
892 };
893
894 /* Always set initial block size before the first branch, which needs
895 * to match the value from binning mode config.
896 */
897 cl_emit(&job->rcl, TILE_LIST_INITIAL_BLOCK_SIZE, init) {
898 init.use_auto_chained_tile_lists = true;
899 init.size_of_first_block_in_chained_tile_lists =
900 TILE_ALLOCATION_BLOCK_SIZE_64B;
901 }
902
903 /* ARB_framebuffer_no_attachments allows rendering to happen even when
904 * the framebuffer has no attachments, the idea being that fragment
905 * shaders can still do image load/store, ssbo, etc without having to
906 * write to actual attachments, so always run at least one iteration
907 * of the loop.
908 */
909 assert(job->num_layers > 0 || (job->load == 0 && job->store == 0));
910 for (int layer = 0; layer < MAX2(1, job->num_layers); layer++)
911 emit_render_layer(job, layer);
912
913 cl_emit(&job->rcl, END_OF_RENDERING, end);
914 }
915