1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/format/u_format.h"
29 #include "util/u_helpers.h"
30 #include "util/u_memory.h"
31 #include "util/u_string.h"
32 #include "util/u_viewport.h"
33
34 #include "freedreno_query_hw.h"
35 #include "freedreno_resource.h"
36
37 #include "fd5_blend.h"
38 #include "fd5_blitter.h"
39 #include "fd5_context.h"
40 #include "fd5_emit.h"
41 #include "fd5_format.h"
42 #include "fd5_image.h"
43 #include "fd5_program.h"
44 #include "fd5_rasterizer.h"
45 #include "fd5_screen.h"
46 #include "fd5_texture.h"
47 #include "fd5_zsa.h"
48
49 #define emit_const_user fd5_emit_const_user
50 #define emit_const_bo fd5_emit_const_bo
51 #include "ir3_const.h"
52
53 /* regid: base const register
54 * prsc or dwords: buffer containing constant values
55 * sizedwords: size of const value buffer
56 */
57 static void
fd5_emit_const_user(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)58 fd5_emit_const_user(struct fd_ringbuffer *ring,
59 const struct ir3_shader_variant *v, uint32_t regid,
60 uint32_t sizedwords, const uint32_t *dwords)
61 {
62 emit_const_asserts(ring, v, regid, sizedwords);
63
64 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + sizedwords);
65 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
66 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
67 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(v->type)) |
68 CP_LOAD_STATE4_0_NUM_UNIT(sizedwords / 4));
69 OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
70 CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
71 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
72 for (int i = 0; i < sizedwords; i++)
73 OUT_RING(ring, ((uint32_t *)dwords)[i]);
74 }
75
76 static void
fd5_emit_const_bo(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t offset,uint32_t sizedwords,struct fd_bo * bo)77 fd5_emit_const_bo(struct fd_ringbuffer *ring,
78 const struct ir3_shader_variant *v, uint32_t regid,
79 uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
80 {
81 uint32_t dst_off = regid / 4;
82 assert(dst_off % 4 == 0);
83 uint32_t num_unit = sizedwords / 4;
84 assert(num_unit % 4 == 0);
85
86 emit_const_asserts(ring, v, regid, sizedwords);
87
88 OUT_PKT7(ring, CP_LOAD_STATE4, 3);
89 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(dst_off) |
90 CP_LOAD_STATE4_0_STATE_SRC(SS4_INDIRECT) |
91 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(v->type)) |
92 CP_LOAD_STATE4_0_NUM_UNIT(num_unit));
93 OUT_RELOC(ring, bo, offset, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS), 0);
94 }
95
96 static void
fd5_emit_const_ptrs(struct fd_ringbuffer * ring,gl_shader_stage type,uint32_t regid,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)97 fd5_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
98 uint32_t regid, uint32_t num, struct fd_bo **bos,
99 uint32_t *offsets)
100 {
101 uint32_t anum = align(num, 2);
102 uint32_t i;
103
104 debug_assert((regid % 4) == 0);
105
106 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (2 * anum));
107 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
108 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
109 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(type)) |
110 CP_LOAD_STATE4_0_NUM_UNIT(anum / 2));
111 OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
112 CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
113 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
114
115 for (i = 0; i < num; i++) {
116 if (bos[i]) {
117 OUT_RELOC(ring, bos[i], offsets[i], 0, 0);
118 } else {
119 OUT_RING(ring, 0xbad00000 | (i << 16));
120 OUT_RING(ring, 0xbad00000 | (i << 16));
121 }
122 }
123
124 for (; i < anum; i++) {
125 OUT_RING(ring, 0xffffffff);
126 OUT_RING(ring, 0xffffffff);
127 }
128 }
129
130 static bool
is_stateobj(struct fd_ringbuffer * ring)131 is_stateobj(struct fd_ringbuffer *ring)
132 {
133 return false;
134 }
135
136 static void
emit_const_ptrs(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t dst_offset,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)137 emit_const_ptrs(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
138 uint32_t dst_offset, uint32_t num, struct fd_bo **bos,
139 uint32_t *offsets)
140 {
141 /* TODO inline this */
142 assert(dst_offset + num <= v->constlen * 4);
143 fd5_emit_const_ptrs(ring, v->type, dst_offset, num, bos, offsets);
144 }
145
146 void
fd5_emit_cs_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,const struct pipe_grid_info * info)147 fd5_emit_cs_consts(const struct ir3_shader_variant *v,
148 struct fd_ringbuffer *ring, struct fd_context *ctx,
149 const struct pipe_grid_info *info)
150 {
151 ir3_emit_cs_consts(v, ring, ctx, info);
152 }
153
154 /* Border color layout is diff from a4xx/a5xx.. if it turns out to be
155 * the same as a6xx then move this somewhere common ;-)
156 *
157 * Entry layout looks like (total size, 0x60 bytes):
158 */
159
160 struct PACKED bcolor_entry {
161 uint32_t fp32[4];
162 uint16_t ui16[4];
163 int16_t si16[4];
164
165 uint16_t fp16[4];
166 uint16_t rgb565;
167 uint16_t rgb5a1;
168 uint16_t rgba4;
169 uint8_t __pad0[2];
170 uint8_t ui8[4];
171 int8_t si8[4];
172 uint32_t rgb10a2;
173 uint32_t z24; /* also s8? */
174
175 uint16_t
176 srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
177 uint8_t __pad1[24];
178 };
179
180 #define FD5_BORDER_COLOR_SIZE 0x60
181 #define FD5_BORDER_COLOR_UPLOAD_SIZE \
182 (2 * PIPE_MAX_SAMPLERS * FD5_BORDER_COLOR_SIZE)
183
184 static void
setup_border_colors(struct fd_texture_stateobj * tex,struct bcolor_entry * entries)185 setup_border_colors(struct fd_texture_stateobj *tex,
186 struct bcolor_entry *entries)
187 {
188 unsigned i, j;
189 STATIC_ASSERT(sizeof(struct bcolor_entry) == FD5_BORDER_COLOR_SIZE);
190
191 for (i = 0; i < tex->num_samplers; i++) {
192 struct bcolor_entry *e = &entries[i];
193 struct pipe_sampler_state *sampler = tex->samplers[i];
194 union pipe_color_union *bc;
195
196 if (!sampler)
197 continue;
198
199 bc = &sampler->border_color;
200
201 /*
202 * XXX HACK ALERT XXX
203 *
204 * The border colors need to be swizzled in a particular
205 * format-dependent order. Even though samplers don't know about
206 * formats, we can assume that with a GL state tracker, there's a
207 * 1:1 correspondence between sampler and texture. Take advantage
208 * of that knowledge.
209 */
210 if ((i >= tex->num_textures) || !tex->textures[i])
211 continue;
212
213 enum pipe_format format = tex->textures[i]->format;
214 const struct util_format_description *desc =
215 util_format_description(format);
216
217 e->rgb565 = 0;
218 e->rgb5a1 = 0;
219 e->rgba4 = 0;
220 e->rgb10a2 = 0;
221 e->z24 = 0;
222
223 for (j = 0; j < 4; j++) {
224 int c = desc->swizzle[j];
225 int cd = c;
226
227 /*
228 * HACK: for PIPE_FORMAT_X24S8_UINT we end up w/ the
229 * stencil border color value in bc->ui[0] but according
230 * to desc->swizzle and desc->channel, the .x component
231 * is NONE and the stencil value is in the y component.
232 * Meanwhile the hardware wants this in the .x componetn.
233 */
234 if ((format == PIPE_FORMAT_X24S8_UINT) ||
235 (format == PIPE_FORMAT_X32_S8X24_UINT)) {
236 if (j == 0) {
237 c = 1;
238 cd = 0;
239 } else {
240 continue;
241 }
242 }
243
244 if (c >= 4)
245 continue;
246
247 if (desc->channel[c].pure_integer) {
248 uint16_t clamped;
249 switch (desc->channel[c].size) {
250 case 2:
251 assert(desc->channel[c].type == UTIL_FORMAT_TYPE_UNSIGNED);
252 clamped = CLAMP(bc->ui[j], 0, 0x3);
253 break;
254 case 8:
255 if (desc->channel[c].type == UTIL_FORMAT_TYPE_SIGNED)
256 clamped = CLAMP(bc->i[j], -128, 127);
257 else
258 clamped = CLAMP(bc->ui[j], 0, 255);
259 break;
260 case 10:
261 assert(desc->channel[c].type == UTIL_FORMAT_TYPE_UNSIGNED);
262 clamped = CLAMP(bc->ui[j], 0, 0x3ff);
263 break;
264 case 16:
265 if (desc->channel[c].type == UTIL_FORMAT_TYPE_SIGNED)
266 clamped = CLAMP(bc->i[j], -32768, 32767);
267 else
268 clamped = CLAMP(bc->ui[j], 0, 65535);
269 break;
270 default:
271 assert(!"Unexpected bit size");
272 case 32:
273 clamped = 0;
274 break;
275 }
276 e->fp32[cd] = bc->ui[j];
277 e->fp16[cd] = clamped;
278 } else {
279 float f = bc->f[j];
280 float f_u = CLAMP(f, 0, 1);
281 float f_s = CLAMP(f, -1, 1);
282
283 e->fp32[c] = fui(f);
284 e->fp16[c] = _mesa_float_to_half(f);
285 e->srgb[c] = _mesa_float_to_half(f_u);
286 e->ui16[c] = f_u * 0xffff;
287 e->si16[c] = f_s * 0x7fff;
288 e->ui8[c] = f_u * 0xff;
289 e->si8[c] = f_s * 0x7f;
290 if (c == 1)
291 e->rgb565 |= (int)(f_u * 0x3f) << 5;
292 else if (c < 3)
293 e->rgb565 |= (int)(f_u * 0x1f) << (c ? 11 : 0);
294 if (c == 3)
295 e->rgb5a1 |= (f_u > 0.5) ? 0x8000 : 0;
296 else
297 e->rgb5a1 |= (int)(f_u * 0x1f) << (c * 5);
298 if (c == 3)
299 e->rgb10a2 |= (int)(f_u * 0x3) << 30;
300 else
301 e->rgb10a2 |= (int)(f_u * 0x3ff) << (c * 10);
302 e->rgba4 |= (int)(f_u * 0xf) << (c * 4);
303 if (c == 0)
304 e->z24 = f_u * 0xffffff;
305 }
306 }
307
308 #ifdef DEBUG
309 memset(&e->__pad0, 0, sizeof(e->__pad0));
310 memset(&e->__pad1, 0, sizeof(e->__pad1));
311 #endif
312 }
313 }
314
315 static void
emit_border_color(struct fd_context * ctx,struct fd_ringbuffer * ring)316 emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring) assert_dt
317 {
318 struct fd5_context *fd5_ctx = fd5_context(ctx);
319 struct bcolor_entry *entries;
320 unsigned off;
321 void *ptr;
322
323 STATIC_ASSERT(sizeof(struct bcolor_entry) == FD5_BORDER_COLOR_SIZE);
324
325 u_upload_alloc(fd5_ctx->border_color_uploader, 0,
326 FD5_BORDER_COLOR_UPLOAD_SIZE, FD5_BORDER_COLOR_UPLOAD_SIZE,
327 &off, &fd5_ctx->border_color_buf, &ptr);
328
329 entries = ptr;
330
331 setup_border_colors(&ctx->tex[PIPE_SHADER_VERTEX], &entries[0]);
332 setup_border_colors(&ctx->tex[PIPE_SHADER_FRAGMENT],
333 &entries[ctx->tex[PIPE_SHADER_VERTEX].num_samplers]);
334
335 OUT_PKT4(ring, REG_A5XX_TPL1_TP_BORDER_COLOR_BASE_ADDR_LO, 2);
336 OUT_RELOC(ring, fd_resource(fd5_ctx->border_color_buf)->bo, off, 0, 0);
337
338 u_upload_unmap(fd5_ctx->border_color_uploader);
339 }
340
341 static bool
emit_textures(struct fd_context * ctx,struct fd_ringbuffer * ring,enum a4xx_state_block sb,struct fd_texture_stateobj * tex)342 emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
343 enum a4xx_state_block sb,
344 struct fd_texture_stateobj *tex) assert_dt
345 {
346 bool needs_border = false;
347 unsigned bcolor_offset =
348 (sb == SB4_FS_TEX) ? ctx->tex[PIPE_SHADER_VERTEX].num_samplers : 0;
349 unsigned i;
350
351 if (tex->num_samplers > 0) {
352 /* output sampler state: */
353 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (4 * tex->num_samplers));
354 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
355 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
356 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
357 CP_LOAD_STATE4_0_NUM_UNIT(tex->num_samplers));
358 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
359 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
360 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
361 for (i = 0; i < tex->num_samplers; i++) {
362 static const struct fd5_sampler_stateobj dummy_sampler = {};
363 const struct fd5_sampler_stateobj *sampler =
364 tex->samplers[i] ? fd5_sampler_stateobj(tex->samplers[i])
365 : &dummy_sampler;
366 OUT_RING(ring, sampler->texsamp0);
367 OUT_RING(ring, sampler->texsamp1);
368 OUT_RING(ring, sampler->texsamp2 |
369 A5XX_TEX_SAMP_2_BCOLOR_OFFSET(bcolor_offset + i));
370 OUT_RING(ring, sampler->texsamp3);
371
372 needs_border |= sampler->needs_border;
373 }
374 }
375
376 if (tex->num_textures > 0) {
377 unsigned num_textures = tex->num_textures;
378
379 /* emit texture state: */
380 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + (12 * num_textures));
381 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
382 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
383 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
384 CP_LOAD_STATE4_0_NUM_UNIT(num_textures));
385 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
386 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
387 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
388 for (i = 0; i < tex->num_textures; i++) {
389 static const struct fd5_pipe_sampler_view dummy_view = {};
390 const struct fd5_pipe_sampler_view *view =
391 tex->textures[i] ? fd5_pipe_sampler_view(tex->textures[i])
392 : &dummy_view;
393 enum a5xx_tile_mode tile_mode = TILE5_LINEAR;
394
395 if (view->base.texture)
396 tile_mode = fd_resource(view->base.texture)->layout.tile_mode;
397
398 OUT_RING(ring,
399 view->texconst0 | A5XX_TEX_CONST_0_TILE_MODE(tile_mode));
400 OUT_RING(ring, view->texconst1);
401 OUT_RING(ring, view->texconst2);
402 OUT_RING(ring, view->texconst3);
403 if (view->base.texture) {
404 struct fd_resource *rsc = fd_resource(view->base.texture);
405 if (view->base.format == PIPE_FORMAT_X32_S8X24_UINT)
406 rsc = rsc->stencil;
407 OUT_RELOC(ring, rsc->bo, view->offset,
408 (uint64_t)view->texconst5 << 32, 0);
409 } else {
410 OUT_RING(ring, 0x00000000);
411 OUT_RING(ring, view->texconst5);
412 }
413 OUT_RING(ring, view->texconst6);
414 OUT_RING(ring, view->texconst7);
415 OUT_RING(ring, view->texconst8);
416 OUT_RING(ring, view->texconst9);
417 OUT_RING(ring, view->texconst10);
418 OUT_RING(ring, view->texconst11);
419 }
420 }
421
422 return needs_border;
423 }
424
425 static void
emit_ssbos(struct fd_context * ctx,struct fd_ringbuffer * ring,enum a4xx_state_block sb,struct fd_shaderbuf_stateobj * so,const struct ir3_shader_variant * v)426 emit_ssbos(struct fd_context *ctx, struct fd_ringbuffer *ring,
427 enum a4xx_state_block sb, struct fd_shaderbuf_stateobj *so,
428 const struct ir3_shader_variant *v)
429 {
430 unsigned count = util_last_bit(so->enabled_mask);
431
432 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + 2 * count);
433 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
434 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
435 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
436 CP_LOAD_STATE4_0_NUM_UNIT(count));
437 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
438 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
439 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
440
441 for (unsigned i = 0; i < count; i++) {
442 struct pipe_shader_buffer *buf = &so->sb[i];
443 unsigned sz = buf->buffer_size;
444
445 /* Unlike a6xx, SSBO size is in bytes. */
446 OUT_RING(ring, A5XX_SSBO_1_0_WIDTH(sz & MASK(16)));
447 OUT_RING(ring, A5XX_SSBO_1_1_HEIGHT(sz >> 16));
448 }
449
450 OUT_PKT7(ring, CP_LOAD_STATE4, 3 + 2 * count);
451 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
452 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
453 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
454 CP_LOAD_STATE4_0_NUM_UNIT(count));
455 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_UBO) |
456 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
457 OUT_RING(ring, CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(0));
458 for (unsigned i = 0; i < count; i++) {
459 struct pipe_shader_buffer *buf = &so->sb[i];
460
461 if (buf->buffer) {
462 struct fd_resource *rsc = fd_resource(buf->buffer);
463 OUT_RELOC(ring, rsc->bo, buf->buffer_offset, 0, 0);
464 } else {
465 OUT_RING(ring, 0x00000000);
466 OUT_RING(ring, 0x00000000);
467 }
468 }
469 }
470
471 void
fd5_emit_vertex_bufs(struct fd_ringbuffer * ring,struct fd5_emit * emit)472 fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit)
473 {
474 int32_t i, j;
475 const struct fd_vertex_state *vtx = emit->vtx;
476 const struct ir3_shader_variant *vp = fd5_emit_get_vp(emit);
477
478 for (i = 0, j = 0; i <= vp->inputs_count; i++) {
479 if (vp->inputs[i].sysval)
480 continue;
481 if (vp->inputs[i].compmask) {
482 struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
483 const struct pipe_vertex_buffer *vb =
484 &vtx->vertexbuf.vb[elem->vertex_buffer_index];
485 struct fd_resource *rsc = fd_resource(vb->buffer.resource);
486 enum pipe_format pfmt = elem->src_format;
487 enum a5xx_vtx_fmt fmt = fd5_pipe2vtx(pfmt);
488 bool isint = util_format_is_pure_integer(pfmt);
489 uint32_t off = vb->buffer_offset + elem->src_offset;
490 uint32_t size = fd_bo_size(rsc->bo) - off;
491 debug_assert(fmt != VFMT5_NONE);
492
493 #ifdef DEBUG
494 /* see
495 * dEQP-GLES31.stress.vertex_attribute_binding.buffer_bounds.bind_vertex_buffer_offset_near_wrap_10
496 */
497 if (off > fd_bo_size(rsc->bo))
498 continue;
499 #endif
500
501 OUT_PKT4(ring, REG_A5XX_VFD_FETCH(j), 4);
502 OUT_RELOC(ring, rsc->bo, off, 0, 0);
503 OUT_RING(ring, size); /* VFD_FETCH[j].SIZE */
504 OUT_RING(ring, vb->stride); /* VFD_FETCH[j].STRIDE */
505
506 OUT_PKT4(ring, REG_A5XX_VFD_DECODE(j), 2);
507 OUT_RING(
508 ring,
509 A5XX_VFD_DECODE_INSTR_IDX(j) | A5XX_VFD_DECODE_INSTR_FORMAT(fmt) |
510 COND(elem->instance_divisor, A5XX_VFD_DECODE_INSTR_INSTANCED) |
511 A5XX_VFD_DECODE_INSTR_SWAP(fd5_pipe2swap(pfmt)) |
512 A5XX_VFD_DECODE_INSTR_UNK30 |
513 COND(!isint, A5XX_VFD_DECODE_INSTR_FLOAT));
514 OUT_RING(
515 ring,
516 MAX2(1, elem->instance_divisor)); /* VFD_DECODE[j].STEP_RATE */
517
518 OUT_PKT4(ring, REG_A5XX_VFD_DEST_CNTL(j), 1);
519 OUT_RING(ring,
520 A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(vp->inputs[i].compmask) |
521 A5XX_VFD_DEST_CNTL_INSTR_REGID(vp->inputs[i].regid));
522
523 j++;
524 }
525 }
526
527 OUT_PKT4(ring, REG_A5XX_VFD_CONTROL_0, 1);
528 OUT_RING(ring, A5XX_VFD_CONTROL_0_VTXCNT(j));
529 }
530
531 void
fd5_emit_state(struct fd_context * ctx,struct fd_ringbuffer * ring,struct fd5_emit * emit)532 fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
533 struct fd5_emit *emit)
534 {
535 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
536 const struct ir3_shader_variant *vp = fd5_emit_get_vp(emit);
537 const struct ir3_shader_variant *fp = fd5_emit_get_fp(emit);
538 const enum fd_dirty_3d_state dirty = emit->dirty;
539 bool needs_border = false;
540
541 emit_marker5(ring, 5);
542
543 if ((dirty & FD_DIRTY_FRAMEBUFFER) && !emit->binning_pass) {
544 unsigned char mrt_comp[A5XX_MAX_RENDER_TARGETS] = {0};
545
546 for (unsigned i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
547 mrt_comp[i] = ((i < pfb->nr_cbufs) && pfb->cbufs[i]) ? 0xf : 0;
548 }
549
550 OUT_PKT4(ring, REG_A5XX_RB_RENDER_COMPONENTS, 1);
551 OUT_RING(ring, A5XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
552 A5XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
553 A5XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
554 A5XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
555 A5XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
556 A5XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
557 A5XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
558 A5XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
559 }
560
561 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_FRAMEBUFFER)) {
562 struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
563 uint32_t rb_alpha_control = zsa->rb_alpha_control;
564
565 if (util_format_is_pure_integer(pipe_surface_format(pfb->cbufs[0])))
566 rb_alpha_control &= ~A5XX_RB_ALPHA_CONTROL_ALPHA_TEST;
567
568 OUT_PKT4(ring, REG_A5XX_RB_ALPHA_CONTROL, 1);
569 OUT_RING(ring, rb_alpha_control);
570
571 OUT_PKT4(ring, REG_A5XX_RB_STENCIL_CONTROL, 1);
572 OUT_RING(ring, zsa->rb_stencil_control);
573 }
574
575 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_BLEND | FD_DIRTY_PROG)) {
576 struct fd5_blend_stateobj *blend = fd5_blend_stateobj(ctx->blend);
577 struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
578
579 if (pfb->zsbuf) {
580 struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
581 uint32_t gras_lrz_cntl = zsa->gras_lrz_cntl;
582
583 if (emit->no_lrz_write || !rsc->lrz || !rsc->lrz_valid)
584 gras_lrz_cntl = 0;
585 else if (emit->binning_pass && blend->lrz_write && zsa->lrz_write)
586 gras_lrz_cntl |= A5XX_GRAS_LRZ_CNTL_LRZ_WRITE;
587
588 OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_CNTL, 1);
589 OUT_RING(ring, gras_lrz_cntl);
590 }
591 }
592
593 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
594 struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
595 struct pipe_stencil_ref *sr = &ctx->stencil_ref;
596
597 OUT_PKT4(ring, REG_A5XX_RB_STENCILREFMASK, 2);
598 OUT_RING(ring, zsa->rb_stencilrefmask |
599 A5XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0]));
600 OUT_RING(ring, zsa->rb_stencilrefmask_bf |
601 A5XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1]));
602 }
603
604 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
605 struct fd5_zsa_stateobj *zsa = fd5_zsa_stateobj(ctx->zsa);
606 bool fragz = fp->no_earlyz || fp->has_kill || zsa->base.alpha_enabled ||
607 fp->writes_pos;
608
609 OUT_PKT4(ring, REG_A5XX_RB_DEPTH_CNTL, 1);
610 OUT_RING(ring, zsa->rb_depth_cntl);
611
612 OUT_PKT4(ring, REG_A5XX_RB_DEPTH_PLANE_CNTL, 1);
613 OUT_RING(ring, COND(fragz, A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z) |
614 COND(fragz && fp->fragcoord_compmask != 0,
615 A5XX_RB_DEPTH_PLANE_CNTL_UNK1));
616
617 OUT_PKT4(ring, REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL, 1);
618 OUT_RING(ring, COND(fragz, A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z) |
619 COND(fragz && fp->fragcoord_compmask != 0,
620 A5XX_GRAS_SU_DEPTH_PLANE_CNTL_UNK1));
621 }
622
623 /* NOTE: scissor enabled bit is part of rasterizer state: */
624 if (dirty & (FD_DIRTY_SCISSOR | FD_DIRTY_RASTERIZER)) {
625 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
626
627 OUT_PKT4(ring, REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0, 2);
628 OUT_RING(ring, A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(scissor->minx) |
629 A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(scissor->miny));
630 OUT_RING(ring, A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(scissor->maxx - 1) |
631 A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(scissor->maxy - 1));
632
633 OUT_PKT4(ring, REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0, 2);
634 OUT_RING(ring, A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(scissor->minx) |
635 A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(scissor->miny));
636 OUT_RING(ring,
637 A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(scissor->maxx - 1) |
638 A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(scissor->maxy - 1));
639
640 ctx->batch->max_scissor.minx =
641 MIN2(ctx->batch->max_scissor.minx, scissor->minx);
642 ctx->batch->max_scissor.miny =
643 MIN2(ctx->batch->max_scissor.miny, scissor->miny);
644 ctx->batch->max_scissor.maxx =
645 MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
646 ctx->batch->max_scissor.maxy =
647 MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
648 }
649
650 if (dirty & FD_DIRTY_VIEWPORT) {
651 fd_wfi(ctx->batch, ring);
652 OUT_PKT4(ring, REG_A5XX_GRAS_CL_VPORT_XOFFSET_0, 6);
653 OUT_RING(ring, A5XX_GRAS_CL_VPORT_XOFFSET_0(ctx->viewport.translate[0]));
654 OUT_RING(ring, A5XX_GRAS_CL_VPORT_XSCALE_0(ctx->viewport.scale[0]));
655 OUT_RING(ring, A5XX_GRAS_CL_VPORT_YOFFSET_0(ctx->viewport.translate[1]));
656 OUT_RING(ring, A5XX_GRAS_CL_VPORT_YSCALE_0(ctx->viewport.scale[1]));
657 OUT_RING(ring, A5XX_GRAS_CL_VPORT_ZOFFSET_0(ctx->viewport.translate[2]));
658 OUT_RING(ring, A5XX_GRAS_CL_VPORT_ZSCALE_0(ctx->viewport.scale[2]));
659 }
660
661 if (dirty & FD_DIRTY_PROG)
662 fd5_program_emit(ctx, ring, emit);
663
664 if (dirty & FD_DIRTY_RASTERIZER) {
665 struct fd5_rasterizer_stateobj *rasterizer =
666 fd5_rasterizer_stateobj(ctx->rasterizer);
667
668 OUT_PKT4(ring, REG_A5XX_GRAS_SU_CNTL, 1);
669 OUT_RING(ring, rasterizer->gras_su_cntl |
670 A5XX_GRAS_SU_CNTL_LINE_MODE(pfb->samples > 1 ?
671 RECTANGULAR : BRESENHAM));
672
673 OUT_PKT4(ring, REG_A5XX_GRAS_SU_POINT_MINMAX, 2);
674 OUT_RING(ring, rasterizer->gras_su_point_minmax);
675 OUT_RING(ring, rasterizer->gras_su_point_size);
676
677 OUT_PKT4(ring, REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
678 OUT_RING(ring, rasterizer->gras_su_poly_offset_scale);
679 OUT_RING(ring, rasterizer->gras_su_poly_offset_offset);
680 OUT_RING(ring, rasterizer->gras_su_poly_offset_clamp);
681
682 OUT_PKT4(ring, REG_A5XX_PC_RASTER_CNTL, 1);
683 OUT_RING(ring, rasterizer->pc_raster_cntl);
684
685 OUT_PKT4(ring, REG_A5XX_GRAS_CL_CNTL, 1);
686 OUT_RING(ring, rasterizer->gras_cl_clip_cntl);
687 }
688
689 /* note: must come after program emit.. because there is some overlap
690 * in registers, ex. PC_PRIMITIVE_CNTL and we rely on some cached
691 * values from fd5_program_emit() to avoid having to re-emit the prog
692 * every time rast state changes.
693 *
694 * Since the primitive restart state is not part of a tracked object, we
695 * re-emit this register every time.
696 */
697 if (emit->info && ctx->rasterizer) {
698 struct fd5_rasterizer_stateobj *rasterizer =
699 fd5_rasterizer_stateobj(ctx->rasterizer);
700 unsigned max_loc = fd5_context(ctx)->max_loc;
701
702 OUT_PKT4(ring, REG_A5XX_PC_PRIMITIVE_CNTL, 1);
703 OUT_RING(ring,
704 rasterizer->pc_primitive_cntl |
705 A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(max_loc) |
706 COND(emit->info->primitive_restart && emit->info->index_size,
707 A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART));
708 }
709
710 if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
711 uint32_t posz_regid = ir3_find_output_regid(fp, FRAG_RESULT_DEPTH);
712 unsigned nr = pfb->nr_cbufs;
713
714 if (emit->binning_pass)
715 nr = 0;
716 else if (ctx->rasterizer->rasterizer_discard)
717 nr = 0;
718
719 OUT_PKT4(ring, REG_A5XX_RB_FS_OUTPUT_CNTL, 1);
720 OUT_RING(ring,
721 A5XX_RB_FS_OUTPUT_CNTL_MRT(nr) |
722 COND(fp->writes_pos, A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z));
723
724 OUT_PKT4(ring, REG_A5XX_SP_FS_OUTPUT_CNTL, 1);
725 OUT_RING(ring, A5XX_SP_FS_OUTPUT_CNTL_MRT(nr) |
726 A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(posz_regid) |
727 A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(regid(63, 0)));
728 }
729
730 ir3_emit_vs_consts(vp, ring, ctx, emit->info, emit->indirect, emit->draw);
731 if (!emit->binning_pass)
732 ir3_emit_fs_consts(fp, ring, ctx);
733
734 struct ir3_stream_output_info *info = &vp->shader->stream_output;
735 if (info->num_outputs) {
736 struct fd_streamout_stateobj *so = &ctx->streamout;
737
738 for (unsigned i = 0; i < so->num_targets; i++) {
739 struct fd_stream_output_target *target =
740 fd_stream_output_target(so->targets[i]);
741
742 if (!target)
743 continue;
744
745 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(i), 3);
746 /* VPC_SO[i].BUFFER_BASE_LO: */
747 OUT_RELOC(ring, fd_resource(target->base.buffer)->bo, 0, 0, 0);
748 OUT_RING(ring, target->base.buffer_size + target->base.buffer_offset);
749
750 struct fd_bo *offset_bo = fd_resource(target->offset_buf)->bo;
751
752 if (so->reset & (1 << i)) {
753 assert(so->offsets[i] == 0);
754
755 OUT_PKT7(ring, CP_MEM_WRITE, 3);
756 OUT_RELOC(ring, offset_bo, 0, 0, 0);
757 OUT_RING(ring, target->base.buffer_offset);
758
759 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(i), 1);
760 OUT_RING(ring, target->base.buffer_offset);
761 } else {
762 OUT_PKT7(ring, CP_MEM_TO_REG, 3);
763 OUT_RING(ring,
764 CP_MEM_TO_REG_0_REG(REG_A5XX_VPC_SO_BUFFER_OFFSET(i)) |
765 CP_MEM_TO_REG_0_SHIFT_BY_2 | CP_MEM_TO_REG_0_UNK31 |
766 CP_MEM_TO_REG_0_CNT(0));
767 OUT_RELOC(ring, offset_bo, 0, 0, 0);
768 }
769
770 // After a draw HW would write the new offset to offset_bo
771 OUT_PKT4(ring, REG_A5XX_VPC_SO_FLUSH_BASE_LO(i), 2);
772 OUT_RELOC(ring, offset_bo, 0, 0, 0);
773
774 so->reset &= ~(1 << i);
775
776 emit->streamout_mask |= (1 << i);
777 }
778 }
779
780 if (!emit->streamout_mask && info->num_outputs) {
781 OUT_PKT7(ring, CP_CONTEXT_REG_BUNCH, 4);
782 OUT_RING(ring, REG_A5XX_VPC_SO_CNTL);
783 OUT_RING(ring, 0);
784 OUT_RING(ring, REG_A5XX_VPC_SO_BUF_CNTL);
785 OUT_RING(ring, 0);
786 } else if (emit->streamout_mask && !(dirty & FD_DIRTY_PROG)) {
787 /* reemit the program (if we haven't already) to re-enable streamout. We
788 * really should switch to setting up program state at compile time so we
789 * can separate the SO state from the rest, and not recompute all the
790 * time.
791 */
792 fd5_program_emit(ctx, ring, emit);
793 }
794
795 if (dirty & FD_DIRTY_BLEND) {
796 struct fd5_blend_stateobj *blend = fd5_blend_stateobj(ctx->blend);
797 uint32_t i;
798
799 for (i = 0; i < A5XX_MAX_RENDER_TARGETS; i++) {
800 enum pipe_format format = pipe_surface_format(pfb->cbufs[i]);
801 bool is_int = util_format_is_pure_integer(format);
802 bool has_alpha = util_format_has_alpha(format);
803 uint32_t control = blend->rb_mrt[i].control;
804
805 if (is_int) {
806 control &= A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
807 control |= A5XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
808 }
809
810 if (!has_alpha) {
811 control &= ~A5XX_RB_MRT_CONTROL_BLEND2;
812 }
813
814 OUT_PKT4(ring, REG_A5XX_RB_MRT_CONTROL(i), 1);
815 OUT_RING(ring, control);
816
817 OUT_PKT4(ring, REG_A5XX_RB_MRT_BLEND_CONTROL(i), 1);
818 OUT_RING(ring, blend->rb_mrt[i].blend_control);
819 }
820
821 OUT_PKT4(ring, REG_A5XX_SP_BLEND_CNTL, 1);
822 OUT_RING(ring, blend->sp_blend_cntl);
823 }
824
825 if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_SAMPLE_MASK)) {
826 struct fd5_blend_stateobj *blend = fd5_blend_stateobj(ctx->blend);
827
828 OUT_PKT4(ring, REG_A5XX_RB_BLEND_CNTL, 1);
829 OUT_RING(ring, blend->rb_blend_cntl |
830 A5XX_RB_BLEND_CNTL_SAMPLE_MASK(ctx->sample_mask));
831 }
832
833 if (dirty & FD_DIRTY_BLEND_COLOR) {
834 struct pipe_blend_color *bcolor = &ctx->blend_color;
835
836 OUT_PKT4(ring, REG_A5XX_RB_BLEND_RED, 8);
837 OUT_RING(ring, A5XX_RB_BLEND_RED_FLOAT(bcolor->color[0]) |
838 A5XX_RB_BLEND_RED_UINT(bcolor->color[0] * 0xff) |
839 A5XX_RB_BLEND_RED_SINT(bcolor->color[0] * 0x7f));
840 OUT_RING(ring, A5XX_RB_BLEND_RED_F32(bcolor->color[0]));
841 OUT_RING(ring, A5XX_RB_BLEND_GREEN_FLOAT(bcolor->color[1]) |
842 A5XX_RB_BLEND_GREEN_UINT(bcolor->color[1] * 0xff) |
843 A5XX_RB_BLEND_GREEN_SINT(bcolor->color[1] * 0x7f));
844 OUT_RING(ring, A5XX_RB_BLEND_RED_F32(bcolor->color[1]));
845 OUT_RING(ring, A5XX_RB_BLEND_BLUE_FLOAT(bcolor->color[2]) |
846 A5XX_RB_BLEND_BLUE_UINT(bcolor->color[2] * 0xff) |
847 A5XX_RB_BLEND_BLUE_SINT(bcolor->color[2] * 0x7f));
848 OUT_RING(ring, A5XX_RB_BLEND_BLUE_F32(bcolor->color[2]));
849 OUT_RING(ring, A5XX_RB_BLEND_ALPHA_FLOAT(bcolor->color[3]) |
850 A5XX_RB_BLEND_ALPHA_UINT(bcolor->color[3] * 0xff) |
851 A5XX_RB_BLEND_ALPHA_SINT(bcolor->color[3] * 0x7f));
852 OUT_RING(ring, A5XX_RB_BLEND_ALPHA_F32(bcolor->color[3]));
853 }
854
855 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX) {
856 needs_border |=
857 emit_textures(ctx, ring, SB4_VS_TEX, &ctx->tex[PIPE_SHADER_VERTEX]);
858 OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 1);
859 OUT_RING(ring, ctx->tex[PIPE_SHADER_VERTEX].num_textures);
860 }
861
862 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX) {
863 needs_border |=
864 emit_textures(ctx, ring, SB4_FS_TEX, &ctx->tex[PIPE_SHADER_FRAGMENT]);
865 }
866
867 OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 1);
868 OUT_RING(ring, ctx->shaderimg[PIPE_SHADER_FRAGMENT].enabled_mask
869 ? ~0
870 : ctx->tex[PIPE_SHADER_FRAGMENT].num_textures);
871
872 OUT_PKT4(ring, REG_A5XX_TPL1_CS_TEX_COUNT, 1);
873 OUT_RING(ring, 0);
874
875 if (needs_border)
876 emit_border_color(ctx, ring);
877
878 if (!emit->binning_pass) {
879 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO)
880 emit_ssbos(ctx, ring, SB4_SSBO, &ctx->shaderbuf[PIPE_SHADER_FRAGMENT],
881 fp);
882
883 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE)
884 fd5_emit_images(ctx, ring, PIPE_SHADER_FRAGMENT, fp);
885 }
886 }
887
888 void
fd5_emit_cs_state(struct fd_context * ctx,struct fd_ringbuffer * ring,struct ir3_shader_variant * cp)889 fd5_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
890 struct ir3_shader_variant *cp)
891 {
892 enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_COMPUTE];
893
894 if (dirty & FD_DIRTY_SHADER_TEX) {
895 bool needs_border = false;
896 needs_border |=
897 emit_textures(ctx, ring, SB4_CS_TEX, &ctx->tex[PIPE_SHADER_COMPUTE]);
898
899 if (needs_border)
900 emit_border_color(ctx, ring);
901
902 OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 1);
903 OUT_RING(ring, 0);
904
905 OUT_PKT4(ring, REG_A5XX_TPL1_HS_TEX_COUNT, 1);
906 OUT_RING(ring, 0);
907
908 OUT_PKT4(ring, REG_A5XX_TPL1_DS_TEX_COUNT, 1);
909 OUT_RING(ring, 0);
910
911 OUT_PKT4(ring, REG_A5XX_TPL1_GS_TEX_COUNT, 1);
912 OUT_RING(ring, 0);
913
914 OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 1);
915 OUT_RING(ring, 0);
916 }
917
918 OUT_PKT4(ring, REG_A5XX_TPL1_CS_TEX_COUNT, 1);
919 OUT_RING(ring, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask
920 ? ~0
921 : ctx->tex[PIPE_SHADER_COMPUTE].num_textures);
922
923 if (dirty & FD_DIRTY_SHADER_SSBO)
924 emit_ssbos(ctx, ring, SB4_CS_SSBO, &ctx->shaderbuf[PIPE_SHADER_COMPUTE],
925 cp);
926
927 if (dirty & FD_DIRTY_SHADER_IMAGE)
928 fd5_emit_images(ctx, ring, PIPE_SHADER_COMPUTE, cp);
929 }
930
931 /* emit setup at begin of new cmdstream buffer (don't rely on previous
932 * state, there could have been a context switch between ioctls):
933 */
934 void
fd5_emit_restore(struct fd_batch * batch,struct fd_ringbuffer * ring)935 fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring)
936 {
937 struct fd_context *ctx = batch->ctx;
938
939 fd5_set_render_mode(ctx, ring, BYPASS);
940 fd5_cache_flush(batch, ring);
941
942 OUT_PKT4(ring, REG_A5XX_HLSQ_UPDATE_CNTL, 1);
943 OUT_RING(ring, 0xfffff);
944
945 /*
946 t7 opcode: CP_PERFCOUNTER_ACTION (50) (4 dwords)
947 0000000500024048: 70d08003 00000000 001c5000 00000005
948 t7 opcode: CP_PERFCOUNTER_ACTION (50) (4 dwords)
949 0000000500024058: 70d08003 00000010 001c7000 00000005
950
951 t7 opcode: CP_WAIT_FOR_IDLE (26) (1 dwords)
952 0000000500024068: 70268000
953 */
954
955 OUT_PKT4(ring, REG_A5XX_PC_RESTART_INDEX, 1);
956 OUT_RING(ring, 0xffffffff);
957
958 OUT_PKT4(ring, REG_A5XX_PC_RASTER_CNTL, 1);
959 OUT_RING(ring, 0x00000012);
960
961 OUT_PKT4(ring, REG_A5XX_GRAS_SU_POINT_MINMAX, 2);
962 OUT_RING(ring, A5XX_GRAS_SU_POINT_MINMAX_MIN(1.0) |
963 A5XX_GRAS_SU_POINT_MINMAX_MAX(4092.0));
964 OUT_RING(ring, A5XX_GRAS_SU_POINT_SIZE(0.5));
965
966 OUT_PKT4(ring, REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL, 1);
967 OUT_RING(ring, 0x00000000); /* GRAS_SU_CONSERVATIVE_RAS_CNTL */
968
969 OUT_PKT4(ring, REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL, 1);
970 OUT_RING(ring, 0x00000000); /* GRAS_SC_SCREEN_SCISSOR_CNTL */
971
972 OUT_PKT4(ring, REG_A5XX_SP_VS_CONFIG_MAX_CONST, 1);
973 OUT_RING(ring, 0); /* SP_VS_CONFIG_MAX_CONST */
974
975 OUT_PKT4(ring, REG_A5XX_SP_FS_CONFIG_MAX_CONST, 1);
976 OUT_RING(ring, 0); /* SP_FS_CONFIG_MAX_CONST */
977
978 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E292, 2);
979 OUT_RING(ring, 0x00000000); /* UNKNOWN_E292 */
980 OUT_RING(ring, 0x00000000); /* UNKNOWN_E293 */
981
982 OUT_PKT4(ring, REG_A5XX_RB_MODE_CNTL, 1);
983 OUT_RING(ring, 0x00000044); /* RB_MODE_CNTL */
984
985 OUT_PKT4(ring, REG_A5XX_RB_DBG_ECO_CNTL, 1);
986 OUT_RING(ring, 0x00100000); /* RB_DBG_ECO_CNTL */
987
988 OUT_PKT4(ring, REG_A5XX_VFD_MODE_CNTL, 1);
989 OUT_RING(ring, 0x00000000); /* VFD_MODE_CNTL */
990
991 OUT_PKT4(ring, REG_A5XX_PC_MODE_CNTL, 1);
992 OUT_RING(ring, 0x0000001f); /* PC_MODE_CNTL */
993
994 OUT_PKT4(ring, REG_A5XX_SP_MODE_CNTL, 1);
995 OUT_RING(ring, 0x0000001e); /* SP_MODE_CNTL */
996
997 if (ctx->screen->gpu_id == 540) {
998 OUT_PKT4(ring, REG_A5XX_SP_DBG_ECO_CNTL, 1);
999 OUT_RING(ring, 0x800); /* SP_DBG_ECO_CNTL */
1000
1001 OUT_PKT4(ring, REG_A5XX_HLSQ_DBG_ECO_CNTL, 1);
1002 OUT_RING(ring, 0x0);
1003
1004 OUT_PKT4(ring, REG_A5XX_VPC_DBG_ECO_CNTL, 1);
1005 OUT_RING(ring, 0x800400);
1006 } else {
1007 OUT_PKT4(ring, REG_A5XX_SP_DBG_ECO_CNTL, 1);
1008 OUT_RING(ring, 0x40000800); /* SP_DBG_ECO_CNTL */
1009 }
1010
1011 OUT_PKT4(ring, REG_A5XX_TPL1_MODE_CNTL, 1);
1012 OUT_RING(ring, 0x00000544); /* TPL1_MODE_CNTL */
1013
1014 OUT_PKT4(ring, REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0, 2);
1015 OUT_RING(ring, 0x00000080); /* HLSQ_TIMEOUT_THRESHOLD_0 */
1016 OUT_RING(ring, 0x00000000); /* HLSQ_TIMEOUT_THRESHOLD_1 */
1017
1018 OUT_PKT4(ring, REG_A5XX_VPC_DBG_ECO_CNTL, 1);
1019 OUT_RING(ring, 0x00000400); /* VPC_DBG_ECO_CNTL */
1020
1021 OUT_PKT4(ring, REG_A5XX_HLSQ_MODE_CNTL, 1);
1022 OUT_RING(ring, 0x00000001); /* HLSQ_MODE_CNTL */
1023
1024 OUT_PKT4(ring, REG_A5XX_VPC_MODE_CNTL, 1);
1025 OUT_RING(ring, 0x00000000); /* VPC_MODE_CNTL */
1026
1027 /* we don't use this yet.. probably best to disable.. */
1028 OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
1029 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1030 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1031 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1032 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1033 OUT_RING(ring, CP_SET_DRAW_STATE__2_ADDR_HI(0));
1034
1035 OUT_PKT4(ring, REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL, 1);
1036 OUT_RING(ring, 0x00000000); /* GRAS_SU_CONSERVATIVE_RAS_CNTL */
1037
1038 OUT_PKT4(ring, REG_A5XX_GRAS_SC_BIN_CNTL, 1);
1039 OUT_RING(ring, 0x00000000); /* GRAS_SC_BIN_CNTL */
1040
1041 OUT_PKT4(ring, REG_A5XX_GRAS_SC_BIN_CNTL, 1);
1042 OUT_RING(ring, 0x00000000); /* GRAS_SC_BIN_CNTL */
1043
1044 OUT_PKT4(ring, REG_A5XX_VPC_FS_PRIMITIVEID_CNTL, 1);
1045 OUT_RING(ring, 0x000000ff); /* VPC_FS_PRIMITIVEID_CNTL */
1046
1047 OUT_PKT4(ring, REG_A5XX_VPC_SO_OVERRIDE, 1);
1048 OUT_RING(ring, A5XX_VPC_SO_OVERRIDE_SO_DISABLE);
1049
1050 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(0), 3);
1051 OUT_RING(ring, 0x00000000); /* VPC_SO_BUFFER_BASE_LO_0 */
1052 OUT_RING(ring, 0x00000000); /* VPC_SO_BUFFER_BASE_HI_0 */
1053 OUT_RING(ring, 0x00000000); /* VPC_SO_BUFFER_SIZE_0 */
1054
1055 OUT_PKT4(ring, REG_A5XX_VPC_SO_FLUSH_BASE_LO(0), 2);
1056 OUT_RING(ring, 0x00000000); /* VPC_SO_FLUSH_BASE_LO_0 */
1057 OUT_RING(ring, 0x00000000); /* VPC_SO_FLUSH_BASE_HI_0 */
1058
1059 OUT_PKT4(ring, REG_A5XX_PC_GS_PARAM, 1);
1060 OUT_RING(ring, 0x00000000); /* PC_GS_PARAM */
1061
1062 OUT_PKT4(ring, REG_A5XX_PC_HS_PARAM, 1);
1063 OUT_RING(ring, 0x00000000); /* PC_HS_PARAM */
1064
1065 OUT_PKT4(ring, REG_A5XX_TPL1_TP_FS_ROTATION_CNTL, 1);
1066 OUT_RING(ring, 0x00000000); /* TPL1_TP_FS_ROTATION_CNTL */
1067
1068 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E004, 1);
1069 OUT_RING(ring, 0x00000000); /* UNKNOWN_E004 */
1070
1071 OUT_PKT4(ring, REG_A5XX_GRAS_SU_LAYERED, 1);
1072 OUT_RING(ring, 0x00000000); /* GRAS_SU_LAYERED */
1073
1074 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUF_CNTL, 1);
1075 OUT_RING(ring, 0x00000000); /* VPC_SO_BUF_CNTL */
1076
1077 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(0), 1);
1078 OUT_RING(ring, 0x00000000); /* UNKNOWN_E2AB */
1079
1080 OUT_PKT4(ring, REG_A5XX_PC_GS_LAYERED, 1);
1081 OUT_RING(ring, 0x00000000); /* PC_GS_LAYERED */
1082
1083 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5AB, 1);
1084 OUT_RING(ring, 0x00000000); /* UNKNOWN_E5AB */
1085
1086 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5C2, 1);
1087 OUT_RING(ring, 0x00000000); /* UNKNOWN_E5C2 */
1088
1089 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_BASE_LO(1), 3);
1090 OUT_RING(ring, 0x00000000);
1091 OUT_RING(ring, 0x00000000);
1092 OUT_RING(ring, 0x00000000);
1093
1094 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(1), 6);
1095 OUT_RING(ring, 0x00000000);
1096 OUT_RING(ring, 0x00000000);
1097 OUT_RING(ring, 0x00000000);
1098 OUT_RING(ring, 0x00000000);
1099 OUT_RING(ring, 0x00000000);
1100 OUT_RING(ring, 0x00000000);
1101
1102 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(2), 6);
1103 OUT_RING(ring, 0x00000000);
1104 OUT_RING(ring, 0x00000000);
1105 OUT_RING(ring, 0x00000000);
1106 OUT_RING(ring, 0x00000000);
1107 OUT_RING(ring, 0x00000000);
1108 OUT_RING(ring, 0x00000000);
1109
1110 OUT_PKT4(ring, REG_A5XX_VPC_SO_BUFFER_OFFSET(3), 3);
1111 OUT_RING(ring, 0x00000000);
1112 OUT_RING(ring, 0x00000000);
1113 OUT_RING(ring, 0x00000000);
1114
1115 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E5DB, 1);
1116 OUT_RING(ring, 0x00000000);
1117
1118 OUT_PKT4(ring, REG_A5XX_SP_HS_CTRL_REG0, 1);
1119 OUT_RING(ring, 0x00000000);
1120
1121 OUT_PKT4(ring, REG_A5XX_SP_GS_CTRL_REG0, 1);
1122 OUT_RING(ring, 0x00000000);
1123
1124 OUT_PKT4(ring, REG_A5XX_TPL1_VS_TEX_COUNT, 4);
1125 OUT_RING(ring, 0x00000000);
1126 OUT_RING(ring, 0x00000000);
1127 OUT_RING(ring, 0x00000000);
1128 OUT_RING(ring, 0x00000000);
1129
1130 OUT_PKT4(ring, REG_A5XX_TPL1_FS_TEX_COUNT, 2);
1131 OUT_RING(ring, 0x00000000);
1132 OUT_RING(ring, 0x00000000);
1133
1134 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7C0, 3);
1135 OUT_RING(ring, 0x00000000);
1136 OUT_RING(ring, 0x00000000);
1137 OUT_RING(ring, 0x00000000);
1138
1139 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7C5, 3);
1140 OUT_RING(ring, 0x00000000);
1141 OUT_RING(ring, 0x00000000);
1142 OUT_RING(ring, 0x00000000);
1143
1144 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7CA, 3);
1145 OUT_RING(ring, 0x00000000);
1146 OUT_RING(ring, 0x00000000);
1147 OUT_RING(ring, 0x00000000);
1148
1149 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7CF, 3);
1150 OUT_RING(ring, 0x00000000);
1151 OUT_RING(ring, 0x00000000);
1152 OUT_RING(ring, 0x00000000);
1153
1154 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7D4, 3);
1155 OUT_RING(ring, 0x00000000);
1156 OUT_RING(ring, 0x00000000);
1157 OUT_RING(ring, 0x00000000);
1158
1159 OUT_PKT4(ring, REG_A5XX_UNKNOWN_E7D9, 3);
1160 OUT_RING(ring, 0x00000000);
1161 OUT_RING(ring, 0x00000000);
1162 OUT_RING(ring, 0x00000000);
1163
1164 OUT_PKT4(ring, REG_A5XX_RB_CLEAR_CNTL, 1);
1165 OUT_RING(ring, 0x00000000);
1166 }
1167
1168 static void
fd5_mem_to_mem(struct fd_ringbuffer * ring,struct pipe_resource * dst,unsigned dst_off,struct pipe_resource * src,unsigned src_off,unsigned sizedwords)1169 fd5_mem_to_mem(struct fd_ringbuffer *ring, struct pipe_resource *dst,
1170 unsigned dst_off, struct pipe_resource *src, unsigned src_off,
1171 unsigned sizedwords)
1172 {
1173 struct fd_bo *src_bo = fd_resource(src)->bo;
1174 struct fd_bo *dst_bo = fd_resource(dst)->bo;
1175 unsigned i;
1176
1177 for (i = 0; i < sizedwords; i++) {
1178 OUT_PKT7(ring, CP_MEM_TO_MEM, 5);
1179 OUT_RING(ring, 0x00000000);
1180 OUT_RELOC(ring, dst_bo, dst_off, 0, 0);
1181 OUT_RELOC(ring, src_bo, src_off, 0, 0);
1182
1183 dst_off += 4;
1184 src_off += 4;
1185 }
1186 }
1187
1188 void
fd5_emit_init_screen(struct pipe_screen * pscreen)1189 fd5_emit_init_screen(struct pipe_screen *pscreen)
1190 {
1191 struct fd_screen *screen = fd_screen(pscreen);
1192 screen->emit_ib = fd5_emit_ib;
1193 screen->mem_to_mem = fd5_mem_to_mem;
1194 }
1195
1196 void
fd5_emit_init(struct pipe_context * pctx)1197 fd5_emit_init(struct pipe_context *pctx)
1198 {
1199 }
1200