1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/format/u_format.h"
29 #include "util/u_helpers.h"
30 #include "util/u_memory.h"
31 #include "util/u_string.h"
32 #include "util/u_viewport.h"
33
34 #include "freedreno_query_hw.h"
35 #include "freedreno_resource.h"
36
37 #include "fd4_blend.h"
38 #include "fd4_context.h"
39 #include "fd4_emit.h"
40 #include "fd4_format.h"
41 #include "fd4_image.h"
42 #include "fd4_program.h"
43 #include "fd4_rasterizer.h"
44 #include "fd4_texture.h"
45 #include "fd4_zsa.h"
46
47 #define emit_const_user fd4_emit_const_user
48 #define emit_const_bo fd4_emit_const_bo
49 #include "ir3_const.h"
50
51 /* regid: base const register
52 * prsc or dwords: buffer containing constant values
53 * sizedwords: size of const value buffer
54 */
55 static void
fd4_emit_const_user(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t sizedwords,const uint32_t * dwords)56 fd4_emit_const_user(struct fd_ringbuffer *ring,
57 const struct ir3_shader_variant *v, uint32_t regid,
58 uint32_t sizedwords, const uint32_t *dwords)
59 {
60 emit_const_asserts(ring, v, regid, sizedwords);
61
62 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + sizedwords);
63 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
64 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
65 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(v->type)) |
66 CP_LOAD_STATE4_0_NUM_UNIT(sizedwords / 4));
67 OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
68 CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
69 for (int i = 0; i < sizedwords; i++)
70 OUT_RING(ring, dwords[i]);
71 }
72
73 static void
fd4_emit_const_bo(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t offset,uint32_t sizedwords,struct fd_bo * bo)74 fd4_emit_const_bo(struct fd_ringbuffer *ring,
75 const struct ir3_shader_variant *v, uint32_t regid,
76 uint32_t offset, uint32_t sizedwords, struct fd_bo *bo)
77 {
78 uint32_t dst_off = regid / 4;
79 assert(dst_off % 4 == 0);
80 uint32_t num_unit = sizedwords / 4;
81 assert(num_unit % 4 == 0);
82
83 emit_const_asserts(ring, v, regid, sizedwords);
84
85 OUT_PKT3(ring, CP_LOAD_STATE4, 2);
86 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(dst_off) |
87 CP_LOAD_STATE4_0_STATE_SRC(SS4_INDIRECT) |
88 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(v->type)) |
89 CP_LOAD_STATE4_0_NUM_UNIT(num_unit));
90 OUT_RELOC(ring, bo, offset, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS), 0);
91 }
92
93 static void
fd4_emit_const_ptrs(struct fd_ringbuffer * ring,gl_shader_stage type,uint32_t regid,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)94 fd4_emit_const_ptrs(struct fd_ringbuffer *ring, gl_shader_stage type,
95 uint32_t regid, uint32_t num, struct fd_bo **bos,
96 uint32_t *offsets)
97 {
98 uint32_t anum = align(num, 4);
99 uint32_t i;
100
101 assert((regid % 4) == 0);
102
103 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + anum);
104 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(regid / 4) |
105 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
106 CP_LOAD_STATE4_0_STATE_BLOCK(fd4_stage2shadersb(type)) |
107 CP_LOAD_STATE4_0_NUM_UNIT(anum / 4));
108 OUT_RING(ring, CP_LOAD_STATE4_1_EXT_SRC_ADDR(0) |
109 CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS));
110
111 for (i = 0; i < num; i++) {
112 if (bos[i]) {
113 OUT_RELOC(ring, bos[i], offsets[i], 0, 0);
114 } else {
115 OUT_RING(ring, 0xbad00000 | (i << 16));
116 }
117 }
118
119 for (; i < anum; i++)
120 OUT_RING(ring, 0xffffffff);
121 }
122
123 static bool
is_stateobj(struct fd_ringbuffer * ring)124 is_stateobj(struct fd_ringbuffer *ring)
125 {
126 return false;
127 }
128
129 static void
emit_const_ptrs(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t dst_offset,uint32_t num,struct fd_bo ** bos,uint32_t * offsets)130 emit_const_ptrs(struct fd_ringbuffer *ring, const struct ir3_shader_variant *v,
131 uint32_t dst_offset, uint32_t num, struct fd_bo **bos,
132 uint32_t *offsets)
133 {
134 /* TODO inline this */
135 assert(dst_offset + num <= v->constlen * 4);
136 fd4_emit_const_ptrs(ring, v->type, dst_offset, num, bos, offsets);
137 }
138
139 void
fd4_emit_cs_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,const struct pipe_grid_info * info)140 fd4_emit_cs_consts(const struct ir3_shader_variant *v,
141 struct fd_ringbuffer *ring, struct fd_context *ctx,
142 const struct pipe_grid_info *info)
143 {
144 ir3_emit_cs_consts(v, ring, ctx, info);
145 }
146
147 static void
emit_textures(struct fd_context * ctx,struct fd_ringbuffer * ring,enum a4xx_state_block sb,struct fd_texture_stateobj * tex,const struct ir3_shader_variant * v)148 emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
149 enum a4xx_state_block sb, struct fd_texture_stateobj *tex,
150 const struct ir3_shader_variant *v)
151 {
152 static const uint32_t bcolor_reg[] = {
153 [SB4_VS_TEX] = REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR,
154 [SB4_FS_TEX] = REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
155 [SB4_CS_TEX] = REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR,
156 };
157 struct fd4_context *fd4_ctx = fd4_context(ctx);
158 bool needs_border = false;
159 unsigned i;
160
161 if (tex->num_samplers > 0 || tex->num_textures > 0) {
162 int num_samplers = tex->num_samplers;
163
164 /* We want to always make sure that there's at least one sampler if
165 * there are going to be texture accesses. Gallium might not upload a
166 * sampler for e.g. buffer textures.
167 */
168 if (num_samplers == 0)
169 num_samplers++;
170
171 /* not sure if this is an a420.0 workaround, but we seem
172 * to need to emit these in pairs.. emit a final dummy
173 * entry if odd # of samplers:
174 */
175 num_samplers = align(num_samplers, 2);
176
177 /* output sampler state: */
178 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * num_samplers));
179 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
180 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
181 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
182 CP_LOAD_STATE4_0_NUM_UNIT(num_samplers));
183 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
184 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
185 for (i = 0; i < tex->num_samplers; i++) {
186 static const struct fd4_sampler_stateobj dummy_sampler = {};
187 const struct fd4_sampler_stateobj *sampler =
188 tex->samplers[i] ? fd4_sampler_stateobj(tex->samplers[i])
189 : &dummy_sampler;
190 OUT_RING(ring, sampler->texsamp0);
191 OUT_RING(ring, sampler->texsamp1);
192
193 needs_border |= sampler->needs_border;
194 }
195
196 for (; i < num_samplers; i++) {
197 OUT_RING(ring, 0x00000000);
198 OUT_RING(ring, 0x00000000);
199 }
200 }
201
202 if (tex->num_textures > 0) {
203 unsigned num_textures = tex->num_textures + v->astc_srgb.count + v->tg4.count;
204
205 /* emit texture state: */
206 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * num_textures));
207 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
208 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
209 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
210 CP_LOAD_STATE4_0_NUM_UNIT(num_textures));
211 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
212 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
213 for (i = 0; i < tex->num_textures; i++) {
214 static const struct fd4_pipe_sampler_view dummy_view = {};
215 const struct fd4_pipe_sampler_view *view =
216 tex->textures[i] ? fd4_pipe_sampler_view(tex->textures[i])
217 : &dummy_view;
218
219 OUT_RING(ring, view->texconst0);
220 OUT_RING(ring, view->texconst1);
221 OUT_RING(ring, view->texconst2);
222 OUT_RING(ring, view->texconst3);
223 if (view->base.texture) {
224 struct fd_resource *rsc = fd_resource(view->base.texture);
225 if (view->base.format == PIPE_FORMAT_X32_S8X24_UINT)
226 rsc = rsc->stencil;
227 OUT_RELOC(ring, rsc->bo, view->offset, view->texconst4, 0);
228 } else {
229 OUT_RING(ring, 0x00000000);
230 }
231 OUT_RING(ring, 0x00000000);
232 OUT_RING(ring, 0x00000000);
233 OUT_RING(ring, 0x00000000);
234 }
235
236 for (i = 0; i < v->astc_srgb.count; i++) {
237 static const struct fd4_pipe_sampler_view dummy_view = {};
238 const struct fd4_pipe_sampler_view *view;
239 unsigned idx = v->astc_srgb.orig_idx[i];
240
241 view = tex->textures[idx] ? fd4_pipe_sampler_view(tex->textures[idx])
242 : &dummy_view;
243
244 assert(view->texconst0 & A4XX_TEX_CONST_0_SRGB);
245
246 OUT_RING(ring, view->texconst0 & ~A4XX_TEX_CONST_0_SRGB);
247 OUT_RING(ring, view->texconst1);
248 OUT_RING(ring, view->texconst2);
249 OUT_RING(ring, view->texconst3);
250 if (view->base.texture) {
251 struct fd_resource *rsc = fd_resource(view->base.texture);
252 OUT_RELOC(ring, rsc->bo, view->offset, view->texconst4, 0);
253 } else {
254 OUT_RING(ring, 0x00000000);
255 }
256 OUT_RING(ring, 0x00000000);
257 OUT_RING(ring, 0x00000000);
258 OUT_RING(ring, 0x00000000);
259 }
260
261 for (i = 0; i < v->tg4.count; i++) {
262 static const struct fd4_pipe_sampler_view dummy_view = {};
263 const struct fd4_pipe_sampler_view *view;
264 unsigned idx = v->tg4.orig_idx[i];
265
266 view = tex->textures[idx] ? fd4_pipe_sampler_view(tex->textures[idx])
267 : &dummy_view;
268
269 unsigned texconst0 = view->texconst0 & ~(0xfff << 4);
270 texconst0 |= A4XX_TEX_CONST_0_SWIZ_X(A4XX_TEX_X) |
271 A4XX_TEX_CONST_0_SWIZ_Y(A4XX_TEX_Y) |
272 A4XX_TEX_CONST_0_SWIZ_Z(A4XX_TEX_Z) |
273 A4XX_TEX_CONST_0_SWIZ_W(A4XX_TEX_W);
274
275 /* Remap integer formats as unorm (will be fixed up in shader) */
276 if (util_format_is_pure_integer(view->base.format)) {
277 texconst0 &= ~A4XX_TEX_CONST_0_FMT__MASK;
278 switch (fd4_pipe2tex(view->base.format)) {
279 case TFMT4_8_8_8_8_UINT:
280 case TFMT4_8_8_8_8_SINT:
281 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_8_8_8_8_UNORM);
282 break;
283 case TFMT4_8_8_UINT:
284 case TFMT4_8_8_SINT:
285 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_8_8_UNORM);
286 break;
287 case TFMT4_8_UINT:
288 case TFMT4_8_SINT:
289 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_8_UNORM);
290 break;
291
292 case TFMT4_16_16_16_16_UINT:
293 case TFMT4_16_16_16_16_SINT:
294 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_16_16_16_16_UNORM);
295 break;
296 case TFMT4_16_16_UINT:
297 case TFMT4_16_16_SINT:
298 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_16_16_UNORM);
299 break;
300 case TFMT4_16_UINT:
301 case TFMT4_16_SINT:
302 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_16_UNORM);
303 break;
304
305 case TFMT4_32_32_32_32_UINT:
306 case TFMT4_32_32_32_32_SINT:
307 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_32_32_32_32_FLOAT);
308 break;
309 case TFMT4_32_32_UINT:
310 case TFMT4_32_32_SINT:
311 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_32_32_FLOAT);
312 break;
313 case TFMT4_32_UINT:
314 case TFMT4_32_SINT:
315 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_32_FLOAT);
316 break;
317
318 case TFMT4_10_10_10_2_UINT:
319 texconst0 |= A4XX_TEX_CONST_0_FMT(TFMT4_10_10_10_2_UNORM);
320 break;
321
322 default:
323 assert(0);
324 }
325 }
326
327 OUT_RING(ring, texconst0);
328 OUT_RING(ring, view->texconst1);
329 OUT_RING(ring, view->texconst2);
330 OUT_RING(ring, view->texconst3);
331 if (view->base.texture) {
332 struct fd_resource *rsc = fd_resource(view->base.texture);
333 OUT_RELOC(ring, rsc->bo, view->offset, view->texconst4, 0);
334 } else {
335 OUT_RING(ring, 0x00000000);
336 }
337 OUT_RING(ring, 0x00000000);
338 OUT_RING(ring, 0x00000000);
339 OUT_RING(ring, 0x00000000);
340 }
341 } else {
342 assert(v->astc_srgb.count == 0);
343 assert(v->tg4.count == 0);
344 }
345
346 if (needs_border) {
347 unsigned off;
348 void *ptr;
349
350 u_upload_alloc(fd4_ctx->border_color_uploader, 0,
351 BORDER_COLOR_UPLOAD_SIZE, BORDER_COLOR_UPLOAD_SIZE, &off,
352 &fd4_ctx->border_color_buf, &ptr);
353
354 fd_setup_border_colors(tex, ptr, 0);
355 OUT_PKT0(ring, bcolor_reg[sb], 1);
356 OUT_RELOC(ring, fd_resource(fd4_ctx->border_color_buf)->bo, off, 0, 0);
357
358 u_upload_unmap(fd4_ctx->border_color_uploader);
359 }
360 }
361
362 /* emit texture state for mem->gmem restore operation.. eventually it would
363 * be good to get rid of this and use normal CSO/etc state for more of these
364 * special cases..
365 */
366 void
fd4_emit_gmem_restore_tex(struct fd_ringbuffer * ring,unsigned nr_bufs,struct pipe_surface ** bufs)367 fd4_emit_gmem_restore_tex(struct fd_ringbuffer *ring, unsigned nr_bufs,
368 struct pipe_surface **bufs)
369 {
370 unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS];
371 int i;
372
373 for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
374 mrt_comp[i] = (i < nr_bufs) ? 0xf : 0;
375 }
376
377 /* output sampler state: */
378 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * nr_bufs));
379 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
380 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
381 CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) |
382 CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs));
383 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_SHADER) |
384 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
385 for (i = 0; i < nr_bufs; i++) {
386 OUT_RING(ring, A4XX_TEX_SAMP_0_XY_MAG(A4XX_TEX_NEAREST) |
387 A4XX_TEX_SAMP_0_XY_MIN(A4XX_TEX_NEAREST) |
388 A4XX_TEX_SAMP_0_WRAP_S(A4XX_TEX_CLAMP_TO_EDGE) |
389 A4XX_TEX_SAMP_0_WRAP_T(A4XX_TEX_CLAMP_TO_EDGE) |
390 A4XX_TEX_SAMP_0_WRAP_R(A4XX_TEX_REPEAT));
391 OUT_RING(ring, 0x00000000);
392 }
393
394 /* emit texture state: */
395 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (8 * nr_bufs));
396 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
397 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
398 CP_LOAD_STATE4_0_STATE_BLOCK(SB4_FS_TEX) |
399 CP_LOAD_STATE4_0_NUM_UNIT(nr_bufs));
400 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(ST4_CONSTANTS) |
401 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
402 for (i = 0; i < nr_bufs; i++) {
403 if (bufs[i]) {
404 struct fd_resource *rsc = fd_resource(bufs[i]->texture);
405 enum pipe_format format = fd_gmem_restore_format(bufs[i]->format);
406
407 /* The restore blit_zs shader expects stencil in sampler 0,
408 * and depth in sampler 1
409 */
410 if (rsc->stencil && (i == 0)) {
411 rsc = rsc->stencil;
412 format = fd_gmem_restore_format(rsc->b.b.format);
413 }
414
415 /* note: PIPE_BUFFER disallowed for surfaces */
416 unsigned lvl = bufs[i]->u.tex.level;
417 unsigned offset =
418 fd_resource_offset(rsc, lvl, bufs[i]->u.tex.first_layer);
419
420 /* z32 restore is accomplished using depth write. If there is
421 * no stencil component (ie. PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
422 * then no render target:
423 *
424 * (The same applies for z32_s8x24, since for stencil sampler
425 * state the above 'if' will replace 'format' with s8)
426 */
427 if ((format == PIPE_FORMAT_Z32_FLOAT) ||
428 (format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT))
429 mrt_comp[i] = 0;
430
431 assert(bufs[i]->u.tex.first_layer == bufs[i]->u.tex.last_layer);
432
433 OUT_RING(ring, A4XX_TEX_CONST_0_FMT(fd4_pipe2tex(format)) |
434 A4XX_TEX_CONST_0_TYPE(A4XX_TEX_2D) |
435 fd4_tex_swiz(format, PIPE_SWIZZLE_X, PIPE_SWIZZLE_Y,
436 PIPE_SWIZZLE_Z, PIPE_SWIZZLE_W));
437 OUT_RING(ring, A4XX_TEX_CONST_1_WIDTH(bufs[i]->width) |
438 A4XX_TEX_CONST_1_HEIGHT(bufs[i]->height));
439 OUT_RING(ring, A4XX_TEX_CONST_2_PITCH(fd_resource_pitch(rsc, lvl)));
440 OUT_RING(ring, 0x00000000);
441 OUT_RELOC(ring, rsc->bo, offset, 0, 0);
442 OUT_RING(ring, 0x00000000);
443 OUT_RING(ring, 0x00000000);
444 OUT_RING(ring, 0x00000000);
445 } else {
446 OUT_RING(ring, A4XX_TEX_CONST_0_FMT(0) |
447 A4XX_TEX_CONST_0_TYPE(A4XX_TEX_2D) |
448 A4XX_TEX_CONST_0_SWIZ_X(A4XX_TEX_ONE) |
449 A4XX_TEX_CONST_0_SWIZ_Y(A4XX_TEX_ONE) |
450 A4XX_TEX_CONST_0_SWIZ_Z(A4XX_TEX_ONE) |
451 A4XX_TEX_CONST_0_SWIZ_W(A4XX_TEX_ONE));
452 OUT_RING(ring, A4XX_TEX_CONST_1_WIDTH(0) | A4XX_TEX_CONST_1_HEIGHT(0));
453 OUT_RING(ring, A4XX_TEX_CONST_2_PITCH(0));
454 OUT_RING(ring, 0x00000000);
455 OUT_RING(ring, 0x00000000);
456 OUT_RING(ring, 0x00000000);
457 OUT_RING(ring, 0x00000000);
458 OUT_RING(ring, 0x00000000);
459 }
460 }
461
462 OUT_PKT0(ring, REG_A4XX_RB_RENDER_COMPONENTS, 1);
463 OUT_RING(ring, A4XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
464 A4XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
465 A4XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
466 A4XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
467 A4XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
468 A4XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
469 A4XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
470 A4XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
471 }
472
473 static void
emit_ssbos(struct fd_context * ctx,struct fd_ringbuffer * ring,enum a4xx_state_block sb,struct fd_shaderbuf_stateobj * so)474 emit_ssbos(struct fd_context *ctx, struct fd_ringbuffer *ring,
475 enum a4xx_state_block sb, struct fd_shaderbuf_stateobj *so)
476 {
477 unsigned count = util_last_bit(so->enabled_mask);
478
479 if (count == 0)
480 return;
481
482 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (4 * count));
483 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
484 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
485 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
486 CP_LOAD_STATE4_0_NUM_UNIT(count));
487 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(0) |
488 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
489 for (unsigned i = 0; i < count; i++) {
490 struct pipe_shader_buffer *buf = &so->sb[i];
491 if (buf->buffer) {
492 struct fd_resource *rsc = fd_resource(buf->buffer);
493 OUT_RELOC(ring, rsc->bo, buf->buffer_offset, 0, 0);
494 } else {
495 OUT_RING(ring, 0x00000000);
496 }
497 OUT_RING(ring, 0x00000000);
498 OUT_RING(ring, 0x00000000);
499 OUT_RING(ring, 0x00000000);
500 }
501
502 OUT_PKT3(ring, CP_LOAD_STATE4, 2 + (2 * count));
503 OUT_RING(ring, CP_LOAD_STATE4_0_DST_OFF(0) |
504 CP_LOAD_STATE4_0_STATE_SRC(SS4_DIRECT) |
505 CP_LOAD_STATE4_0_STATE_BLOCK(sb) |
506 CP_LOAD_STATE4_0_NUM_UNIT(count));
507 OUT_RING(ring, CP_LOAD_STATE4_1_STATE_TYPE(1) |
508 CP_LOAD_STATE4_1_EXT_SRC_ADDR(0));
509 for (unsigned i = 0; i < count; i++) {
510 struct pipe_shader_buffer *buf = &so->sb[i];
511 unsigned sz = buf->buffer_size;
512
513 /* width is in dwords, overflows into height: */
514 sz /= 4;
515
516 OUT_RING(ring, A4XX_SSBO_1_0_WIDTH(sz));
517 OUT_RING(ring, A4XX_SSBO_1_1_HEIGHT(sz >> 16));
518 }
519 }
520
521 void
fd4_emit_vertex_bufs(struct fd_ringbuffer * ring,struct fd4_emit * emit)522 fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit)
523 {
524 int32_t i, j, last = -1;
525 uint32_t total_in = 0;
526 const struct fd_vertex_state *vtx = emit->vtx;
527 const struct ir3_shader_variant *vp = fd4_emit_get_vp(emit);
528 unsigned vertex_regid = regid(63, 0);
529 unsigned instance_regid = regid(63, 0);
530 unsigned vtxcnt_regid = regid(63, 0);
531
532 /* Note that sysvals come *after* normal inputs: */
533 for (i = 0; i < vp->inputs_count; i++) {
534 if (!vp->inputs[i].compmask)
535 continue;
536 if (vp->inputs[i].sysval) {
537 switch (vp->inputs[i].slot) {
538 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE:
539 vertex_regid = vp->inputs[i].regid;
540 break;
541 case SYSTEM_VALUE_INSTANCE_ID:
542 instance_regid = vp->inputs[i].regid;
543 break;
544 case SYSTEM_VALUE_VERTEX_CNT:
545 vtxcnt_regid = vp->inputs[i].regid;
546 break;
547 default:
548 unreachable("invalid system value");
549 break;
550 }
551 } else if (i < vtx->vtx->num_elements) {
552 last = i;
553 }
554 }
555
556 for (i = 0, j = 0; i <= last; i++) {
557 assert(!vp->inputs[i].sysval);
558 if (vp->inputs[i].compmask) {
559 struct pipe_vertex_element *elem = &vtx->vtx->pipe[i];
560 const struct pipe_vertex_buffer *vb =
561 &vtx->vertexbuf.vb[elem->vertex_buffer_index];
562 struct fd_resource *rsc = fd_resource(vb->buffer.resource);
563 enum pipe_format pfmt = elem->src_format;
564 enum a4xx_vtx_fmt fmt = fd4_pipe2vtx(pfmt);
565 bool switchnext = (i != last) || (vertex_regid != regid(63, 0)) ||
566 (instance_regid != regid(63, 0)) ||
567 (vtxcnt_regid != regid(63, 0));
568 bool isint = util_format_is_pure_integer(pfmt);
569 uint32_t fs = util_format_get_blocksize(pfmt);
570 uint32_t off = vb->buffer_offset + elem->src_offset;
571 uint32_t size = vb->buffer.resource->width0 - off;
572 assert(fmt != VFMT4_NONE);
573
574 OUT_PKT0(ring, REG_A4XX_VFD_FETCH(j), 4);
575 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(fs - 1) |
576 A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(vb->stride) |
577 COND(elem->instance_divisor,
578 A4XX_VFD_FETCH_INSTR_0_INSTANCED) |
579 COND(switchnext, A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT));
580 OUT_RELOC(ring, rsc->bo, off, 0, 0);
581 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_2_SIZE(size));
582 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_3_STEPRATE(
583 MAX2(1, elem->instance_divisor)));
584
585 OUT_PKT0(ring, REG_A4XX_VFD_DECODE_INSTR(j), 1);
586 OUT_RING(ring,
587 A4XX_VFD_DECODE_INSTR_CONSTFILL |
588 A4XX_VFD_DECODE_INSTR_WRITEMASK(vp->inputs[i].compmask) |
589 A4XX_VFD_DECODE_INSTR_FORMAT(fmt) |
590 A4XX_VFD_DECODE_INSTR_SWAP(fd4_pipe2swap(pfmt)) |
591 A4XX_VFD_DECODE_INSTR_REGID(vp->inputs[i].regid) |
592 A4XX_VFD_DECODE_INSTR_SHIFTCNT(fs) |
593 A4XX_VFD_DECODE_INSTR_LASTCOMPVALID |
594 COND(isint, A4XX_VFD_DECODE_INSTR_INT) |
595 COND(switchnext, A4XX_VFD_DECODE_INSTR_SWITCHNEXT));
596
597 total_in += util_bitcount(vp->inputs[i].compmask);
598 j++;
599 }
600 }
601
602 /* hw doesn't like to be configured for zero vbo's, it seems: */
603 if (last < 0) {
604 /* just recycle the shader bo, we just need to point to *something*
605 * valid:
606 */
607 struct fd_bo *dummy_vbo = vp->bo;
608 bool switchnext = (vertex_regid != regid(63, 0)) ||
609 (instance_regid != regid(63, 0)) ||
610 (vtxcnt_regid != regid(63, 0));
611
612 OUT_PKT0(ring, REG_A4XX_VFD_FETCH(0), 4);
613 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(0) |
614 A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(0) |
615 COND(switchnext, A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT));
616 OUT_RELOC(ring, dummy_vbo, 0, 0, 0);
617 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_2_SIZE(1));
618 OUT_RING(ring, A4XX_VFD_FETCH_INSTR_3_STEPRATE(1));
619
620 OUT_PKT0(ring, REG_A4XX_VFD_DECODE_INSTR(0), 1);
621 OUT_RING(ring, A4XX_VFD_DECODE_INSTR_CONSTFILL |
622 A4XX_VFD_DECODE_INSTR_WRITEMASK(0x1) |
623 A4XX_VFD_DECODE_INSTR_FORMAT(VFMT4_8_UNORM) |
624 A4XX_VFD_DECODE_INSTR_SWAP(XYZW) |
625 A4XX_VFD_DECODE_INSTR_REGID(regid(0, 0)) |
626 A4XX_VFD_DECODE_INSTR_SHIFTCNT(1) |
627 A4XX_VFD_DECODE_INSTR_LASTCOMPVALID |
628 COND(switchnext, A4XX_VFD_DECODE_INSTR_SWITCHNEXT));
629
630 total_in = 1;
631 j = 1;
632 }
633
634 OUT_PKT0(ring, REG_A4XX_VFD_CONTROL_0, 5);
635 OUT_RING(ring, A4XX_VFD_CONTROL_0_TOTALATTRTOVS(total_in) |
636 0xa0000 | /* XXX */
637 A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(j) |
638 A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(j));
639 OUT_RING(ring, A4XX_VFD_CONTROL_1_MAXSTORAGE(129) | // XXX
640 A4XX_VFD_CONTROL_1_REGID4VTX(vertex_regid) |
641 A4XX_VFD_CONTROL_1_REGID4INST(instance_regid));
642 OUT_RING(ring, 0x00000000); /* XXX VFD_CONTROL_2 */
643 OUT_RING(ring, A4XX_VFD_CONTROL_3_REGID_VTXCNT(vtxcnt_regid));
644 OUT_RING(ring, 0x00000000); /* XXX VFD_CONTROL_4 */
645
646 /* cache invalidate, otherwise vertex fetch could see
647 * stale vbo contents:
648 */
649 OUT_PKT0(ring, REG_A4XX_UCHE_INVALIDATE0, 2);
650 OUT_RING(ring, 0x00000000);
651 OUT_RING(ring, 0x00000012);
652 }
653
654 void
fd4_emit_state(struct fd_context * ctx,struct fd_ringbuffer * ring,struct fd4_emit * emit)655 fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
656 struct fd4_emit *emit)
657 {
658 const struct ir3_shader_variant *vp = fd4_emit_get_vp(emit);
659 const struct ir3_shader_variant *fp = fd4_emit_get_fp(emit);
660 const enum fd_dirty_3d_state dirty = emit->dirty;
661
662 emit_marker(ring, 5);
663
664 if ((dirty & FD_DIRTY_FRAMEBUFFER) && !emit->binning_pass) {
665 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
666 unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS] = {0};
667
668 for (unsigned i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
669 mrt_comp[i] = ((i < pfb->nr_cbufs) && pfb->cbufs[i]) ? 0xf : 0;
670 }
671
672 OUT_PKT0(ring, REG_A4XX_RB_RENDER_COMPONENTS, 1);
673 OUT_RING(ring, A4XX_RB_RENDER_COMPONENTS_RT0(mrt_comp[0]) |
674 A4XX_RB_RENDER_COMPONENTS_RT1(mrt_comp[1]) |
675 A4XX_RB_RENDER_COMPONENTS_RT2(mrt_comp[2]) |
676 A4XX_RB_RENDER_COMPONENTS_RT3(mrt_comp[3]) |
677 A4XX_RB_RENDER_COMPONENTS_RT4(mrt_comp[4]) |
678 A4XX_RB_RENDER_COMPONENTS_RT5(mrt_comp[5]) |
679 A4XX_RB_RENDER_COMPONENTS_RT6(mrt_comp[6]) |
680 A4XX_RB_RENDER_COMPONENTS_RT7(mrt_comp[7]));
681 }
682
683 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_FRAMEBUFFER)) {
684 struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa);
685 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
686 uint32_t rb_alpha_control = zsa->rb_alpha_control;
687
688 if (util_format_is_pure_integer(pipe_surface_format(pfb->cbufs[0])))
689 rb_alpha_control &= ~A4XX_RB_ALPHA_CONTROL_ALPHA_TEST;
690
691 OUT_PKT0(ring, REG_A4XX_RB_ALPHA_CONTROL, 1);
692 OUT_RING(ring, rb_alpha_control);
693
694 OUT_PKT0(ring, REG_A4XX_RB_STENCIL_CONTROL, 2);
695 OUT_RING(ring, zsa->rb_stencil_control);
696 OUT_RING(ring, zsa->rb_stencil_control2);
697 }
698
699 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
700 struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa);
701 struct pipe_stencil_ref *sr = &ctx->stencil_ref;
702
703 OUT_PKT0(ring, REG_A4XX_RB_STENCILREFMASK, 2);
704 OUT_RING(ring, zsa->rb_stencilrefmask |
705 A4XX_RB_STENCILREFMASK_STENCILREF(sr->ref_value[0]));
706 OUT_RING(ring, zsa->rb_stencilrefmask_bf |
707 A4XX_RB_STENCILREFMASK_BF_STENCILREF(sr->ref_value[1]));
708 }
709
710 if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_RASTERIZER | FD_DIRTY_PROG)) {
711 struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa);
712 bool fragz = fp->no_earlyz || fp->has_kill || fp->writes_pos;
713 bool latez = !fp->fs.early_fragment_tests && fragz;
714 bool clamp = !ctx->rasterizer->depth_clip_near;
715
716 OUT_PKT0(ring, REG_A4XX_RB_DEPTH_CONTROL, 1);
717 OUT_RING(ring, zsa->rb_depth_control |
718 COND(clamp, A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE) |
719 COND(latez, A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE) |
720 COND(fragz && fp->fragcoord_compmask != 0,
721 A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS));
722
723 /* maybe this register/bitfield needs a better name.. this
724 * appears to be just disabling early-z
725 */
726 OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1);
727 OUT_RING(ring, zsa->gras_alpha_control |
728 COND(latez, A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE) |
729 COND(fragz && fp->fragcoord_compmask != 0,
730 A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS));
731 }
732
733 if (dirty & FD_DIRTY_RASTERIZER) {
734 struct fd4_rasterizer_stateobj *rasterizer =
735 fd4_rasterizer_stateobj(ctx->rasterizer);
736
737 OUT_PKT0(ring, REG_A4XX_GRAS_SU_MODE_CONTROL, 1);
738 OUT_RING(ring, rasterizer->gras_su_mode_control |
739 A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS);
740
741 OUT_PKT0(ring, REG_A4XX_GRAS_SU_POINT_MINMAX, 2);
742 OUT_RING(ring, rasterizer->gras_su_point_minmax);
743 OUT_RING(ring, rasterizer->gras_su_point_size);
744
745 OUT_PKT0(ring, REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE, 3);
746 OUT_RING(ring, rasterizer->gras_su_poly_offset_scale);
747 OUT_RING(ring, rasterizer->gras_su_poly_offset_offset);
748 OUT_RING(ring, rasterizer->gras_su_poly_offset_clamp);
749
750 OUT_PKT0(ring, REG_A4XX_GRAS_CL_CLIP_CNTL, 1);
751 OUT_RING(ring, rasterizer->gras_cl_clip_cntl);
752 }
753
754 /* NOTE: since primitive_restart is not actually part of any
755 * state object, we need to make sure that we always emit
756 * PRIM_VTX_CNTL.. either that or be more clever and detect
757 * when it changes.
758 */
759 if (emit->info) {
760 const struct pipe_draw_info *info = emit->info;
761 struct fd4_rasterizer_stateobj *rast =
762 fd4_rasterizer_stateobj(ctx->rasterizer);
763 uint32_t val = rast->pc_prim_vtx_cntl;
764
765 if (info->index_size && info->primitive_restart)
766 val |= A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART;
767
768 val |= COND(vp->writes_psize, A4XX_PC_PRIM_VTX_CNTL_PSIZE);
769
770 if (fp->total_in > 0) {
771 uint32_t varout = align(fp->total_in, 16) / 16;
772 if (varout > 1)
773 varout = align(varout, 2);
774 val |= A4XX_PC_PRIM_VTX_CNTL_VAROUT(varout);
775 }
776
777 OUT_PKT0(ring, REG_A4XX_PC_PRIM_VTX_CNTL, 2);
778 OUT_RING(ring, val);
779 OUT_RING(ring, rast->pc_prim_vtx_cntl2);
780 }
781
782 /* NOTE: scissor enabled bit is part of rasterizer state: */
783 if (dirty & (FD_DIRTY_SCISSOR | FD_DIRTY_RASTERIZER)) {
784 struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
785
786 OUT_PKT0(ring, REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR, 2);
787 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor->maxx - 1) |
788 A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor->maxy - 1));
789 OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor->minx) |
790 A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor->miny));
791
792 ctx->batch->max_scissor.minx =
793 MIN2(ctx->batch->max_scissor.minx, scissor->minx);
794 ctx->batch->max_scissor.miny =
795 MIN2(ctx->batch->max_scissor.miny, scissor->miny);
796 ctx->batch->max_scissor.maxx =
797 MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
798 ctx->batch->max_scissor.maxy =
799 MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
800 }
801
802 if (dirty & FD_DIRTY_VIEWPORT) {
803 fd_wfi(ctx->batch, ring);
804 OUT_PKT0(ring, REG_A4XX_GRAS_CL_VPORT_XOFFSET_0, 6);
805 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XOFFSET_0(ctx->viewport.translate[0]));
806 OUT_RING(ring, A4XX_GRAS_CL_VPORT_XSCALE_0(ctx->viewport.scale[0]));
807 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YOFFSET_0(ctx->viewport.translate[1]));
808 OUT_RING(ring, A4XX_GRAS_CL_VPORT_YSCALE_0(ctx->viewport.scale[1]));
809 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZOFFSET_0(ctx->viewport.translate[2]));
810 OUT_RING(ring, A4XX_GRAS_CL_VPORT_ZSCALE_0(ctx->viewport.scale[2]));
811 }
812
813 if (dirty &
814 (FD_DIRTY_VIEWPORT | FD_DIRTY_RASTERIZER | FD_DIRTY_FRAMEBUFFER)) {
815 float zmin, zmax;
816 int depth = 24;
817 if (ctx->batch->framebuffer.zsbuf) {
818 depth = util_format_get_component_bits(
819 pipe_surface_format(ctx->batch->framebuffer.zsbuf),
820 UTIL_FORMAT_COLORSPACE_ZS, 0);
821 }
822 util_viewport_zmin_zmax(&ctx->viewport, ctx->rasterizer->clip_halfz,
823 &zmin, &zmax);
824
825 OUT_PKT0(ring, REG_A4XX_RB_VPORT_Z_CLAMP(0), 2);
826 if (depth == 32) {
827 OUT_RING(ring, fui(zmin));
828 OUT_RING(ring, fui(zmax));
829 } else if (depth == 16) {
830 OUT_RING(ring, (uint32_t)(zmin * 0xffff));
831 OUT_RING(ring, (uint32_t)(zmax * 0xffff));
832 } else {
833 OUT_RING(ring, (uint32_t)(zmin * 0xffffff));
834 OUT_RING(ring, (uint32_t)(zmax * 0xffffff));
835 }
836 }
837
838 if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER)) {
839 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
840 unsigned n = pfb->nr_cbufs;
841 /* if we have depth/stencil, we need at least on MRT: */
842 if (pfb->zsbuf)
843 n = MAX2(1, n);
844 fd4_program_emit(ring, emit, n, pfb->cbufs);
845 }
846
847 if (!emit->skip_consts) { /* evil hack to deal sanely with clear path */
848 ir3_emit_vs_consts(vp, ring, ctx, emit->info, emit->indirect, emit->draw);
849 if (!emit->binning_pass)
850 ir3_emit_fs_consts(fp, ring, ctx);
851 }
852
853 if ((dirty & FD_DIRTY_BLEND)) {
854 struct fd4_blend_stateobj *blend = fd4_blend_stateobj(ctx->blend);
855 uint32_t i;
856
857 for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
858 enum pipe_format format =
859 pipe_surface_format(ctx->batch->framebuffer.cbufs[i]);
860 bool is_int = util_format_is_pure_integer(format);
861 bool has_alpha = util_format_has_alpha(format);
862 uint32_t control = blend->rb_mrt[i].control;
863
864 if (is_int) {
865 control &= A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
866 control |= A4XX_RB_MRT_CONTROL_ROP_CODE(ROP_COPY);
867 }
868
869 if (!has_alpha) {
870 control &= ~A4XX_RB_MRT_CONTROL_BLEND2;
871 }
872
873 OUT_PKT0(ring, REG_A4XX_RB_MRT_CONTROL(i), 1);
874 OUT_RING(ring, control);
875
876 OUT_PKT0(ring, REG_A4XX_RB_MRT_BLEND_CONTROL(i), 1);
877 OUT_RING(ring, blend->rb_mrt[i].blend_control);
878 }
879
880 OUT_PKT0(ring, REG_A4XX_RB_FS_OUTPUT, 1);
881 OUT_RING(ring,
882 blend->rb_fs_output | A4XX_RB_FS_OUTPUT_SAMPLE_MASK(0xffff));
883 }
884
885 if (dirty & FD_DIRTY_BLEND_COLOR) {
886 struct pipe_blend_color *bcolor = &ctx->blend_color;
887
888 OUT_PKT0(ring, REG_A4XX_RB_BLEND_RED, 8);
889 OUT_RING(ring, A4XX_RB_BLEND_RED_FLOAT(bcolor->color[0]) |
890 A4XX_RB_BLEND_RED_UINT(CLAMP(bcolor->color[0], 0.f, 1.f) * 0xff) |
891 A4XX_RB_BLEND_RED_SINT(CLAMP(bcolor->color[0], -1.f, 1.f) * 0x7f));
892 OUT_RING(ring, A4XX_RB_BLEND_RED_F32(bcolor->color[0]));
893 OUT_RING(ring, A4XX_RB_BLEND_GREEN_FLOAT(bcolor->color[1]) |
894 A4XX_RB_BLEND_GREEN_UINT(CLAMP(bcolor->color[1], 0.f, 1.f) * 0xff) |
895 A4XX_RB_BLEND_GREEN_SINT(CLAMP(bcolor->color[1], -1.f, 1.f) * 0x7f));
896 OUT_RING(ring, A4XX_RB_BLEND_GREEN_F32(bcolor->color[1]));
897 OUT_RING(ring, A4XX_RB_BLEND_BLUE_FLOAT(bcolor->color[2]) |
898 A4XX_RB_BLEND_BLUE_UINT(CLAMP(bcolor->color[2], 0.f, 1.f) * 0xff) |
899 A4XX_RB_BLEND_BLUE_SINT(CLAMP(bcolor->color[2], -1.f, 1.f) * 0x7f));
900 OUT_RING(ring, A4XX_RB_BLEND_BLUE_F32(bcolor->color[2]));
901 OUT_RING(ring, A4XX_RB_BLEND_ALPHA_FLOAT(bcolor->color[3]) |
902 A4XX_RB_BLEND_ALPHA_UINT(CLAMP(bcolor->color[3], 0.f, 1.f) * 0xff) |
903 A4XX_RB_BLEND_ALPHA_SINT(CLAMP(bcolor->color[3], -1.f, 1.f) * 0x7f));
904 OUT_RING(ring, A4XX_RB_BLEND_ALPHA_F32(bcolor->color[3]));
905 }
906
907 if (ctx->dirty_shader[PIPE_SHADER_VERTEX] & FD_DIRTY_SHADER_TEX)
908 emit_textures(ctx, ring, SB4_VS_TEX, &ctx->tex[PIPE_SHADER_VERTEX], vp);
909
910 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_TEX)
911 emit_textures(ctx, ring, SB4_FS_TEX, &ctx->tex[PIPE_SHADER_FRAGMENT], fp);
912
913 if (!emit->binning_pass) {
914 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_SSBO)
915 emit_ssbos(ctx, ring, SB4_SSBO, &ctx->shaderbuf[PIPE_SHADER_FRAGMENT]);
916
917 if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & FD_DIRTY_SHADER_IMAGE)
918 fd4_emit_images(ctx, ring, PIPE_SHADER_FRAGMENT, fp);
919 }
920 }
921
922 void
fd4_emit_cs_state(struct fd_context * ctx,struct fd_ringbuffer * ring,struct ir3_shader_variant * cp)923 fd4_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
924 struct ir3_shader_variant *cp)
925 {
926 enum fd_dirty_shader_state dirty = ctx->dirty_shader[PIPE_SHADER_COMPUTE];
927 unsigned num_textures = ctx->tex[PIPE_SHADER_COMPUTE].num_textures +
928 cp->astc_srgb.count +
929 cp->tg4.count;
930
931 if (dirty & FD_DIRTY_SHADER_TEX) {
932 emit_textures(ctx, ring, SB4_CS_TEX, &ctx->tex[PIPE_SHADER_COMPUTE], cp);
933
934 OUT_PKT0(ring, REG_A4XX_TPL1_TP_TEX_COUNT, 1);
935 OUT_RING(ring, 0);
936 }
937
938 OUT_PKT0(ring, REG_A4XX_TPL1_TP_FS_TEX_COUNT, 1);
939 OUT_RING(ring, A4XX_TPL1_TP_FS_TEX_COUNT_CS(
940 ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask ? 0x80 : num_textures));
941
942 if (dirty & FD_DIRTY_SHADER_SSBO)
943 emit_ssbos(ctx, ring, SB4_CS_SSBO, &ctx->shaderbuf[PIPE_SHADER_COMPUTE]);
944
945 if (dirty & FD_DIRTY_SHADER_IMAGE)
946 fd4_emit_images(ctx, ring, PIPE_SHADER_COMPUTE, cp);
947 }
948
949 /* emit setup at begin of new cmdstream buffer (don't rely on previous
950 * state, there could have been a context switch between ioctls):
951 */
952 void
fd4_emit_restore(struct fd_batch * batch,struct fd_ringbuffer * ring)953 fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring)
954 {
955 struct fd_context *ctx = batch->ctx;
956 struct fd4_context *fd4_ctx = fd4_context(ctx);
957
958 OUT_PKT0(ring, REG_A4XX_RBBM_PERFCTR_CTL, 1);
959 OUT_RING(ring, 0x00000001);
960
961 OUT_PKT0(ring, REG_A4XX_GRAS_DEBUG_ECO_CONTROL, 1);
962 OUT_RING(ring, 0x00000000);
963
964 OUT_PKT0(ring, REG_A4XX_SP_MODE_CONTROL, 1);
965 OUT_RING(ring, 0x0000001e);
966
967 OUT_PKT0(ring, REG_A4XX_TPL1_TP_MODE_CONTROL, 1);
968 OUT_RING(ring, 0x0000003a);
969
970 OUT_PKT0(ring, REG_A4XX_UNKNOWN_0D01, 1);
971 OUT_RING(ring, 0x00000001);
972
973 OUT_PKT0(ring, REG_A4XX_UNKNOWN_0E42, 1);
974 OUT_RING(ring, 0x00000000);
975
976 OUT_PKT0(ring, REG_A4XX_UCHE_CACHE_WAYS_VFD, 1);
977 OUT_RING(ring, 0x00000007);
978
979 OUT_PKT0(ring, REG_A4XX_UCHE_CACHE_MODE_CONTROL, 1);
980 OUT_RING(ring, 0x00000000);
981
982 OUT_PKT0(ring, REG_A4XX_UCHE_INVALIDATE0, 2);
983 OUT_RING(ring, 0x00000000);
984 OUT_RING(ring, 0x00000012);
985
986 OUT_PKT0(ring, REG_A4XX_HLSQ_MODE_CONTROL, 1);
987 OUT_RING(ring, 0x00000003);
988
989 OUT_PKT0(ring, REG_A4XX_UNKNOWN_0CC5, 1);
990 OUT_RING(ring, 0x00000006);
991
992 OUT_PKT0(ring, REG_A4XX_UNKNOWN_0CC6, 1);
993 OUT_RING(ring, 0x00000000);
994
995 OUT_PKT0(ring, REG_A4XX_UNKNOWN_0EC2, 1);
996 OUT_RING(ring, 0x00040000);
997
998 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2001, 1);
999 OUT_RING(ring, 0x00000000);
1000
1001 OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
1002 OUT_RING(ring, 0x00001000);
1003
1004 OUT_PKT0(ring, REG_A4XX_UNKNOWN_20EF, 1);
1005 OUT_RING(ring, 0x00000000);
1006
1007 OUT_PKT0(ring, REG_A4XX_RB_BLEND_RED, 4);
1008 OUT_RING(ring, A4XX_RB_BLEND_RED_UINT(0) | A4XX_RB_BLEND_RED_FLOAT(0.0f));
1009 OUT_RING(ring, A4XX_RB_BLEND_GREEN_UINT(0) | A4XX_RB_BLEND_GREEN_FLOAT(0.0f));
1010 OUT_RING(ring, A4XX_RB_BLEND_BLUE_UINT(0) | A4XX_RB_BLEND_BLUE_FLOAT(0.0f));
1011 OUT_RING(ring,
1012 A4XX_RB_BLEND_ALPHA_UINT(0x7fff) | A4XX_RB_BLEND_ALPHA_FLOAT(1.0f));
1013
1014 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2152, 1);
1015 OUT_RING(ring, 0x00000000);
1016
1017 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2153, 1);
1018 OUT_RING(ring, 0x00000000);
1019
1020 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2154, 1);
1021 OUT_RING(ring, 0x00000000);
1022
1023 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2155, 1);
1024 OUT_RING(ring, 0x00000000);
1025
1026 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2156, 1);
1027 OUT_RING(ring, 0x00000000);
1028
1029 OUT_PKT0(ring, REG_A4XX_UNKNOWN_2157, 1);
1030 OUT_RING(ring, 0x00000000);
1031
1032 OUT_PKT0(ring, REG_A4XX_UNKNOWN_21C3, 1);
1033 OUT_RING(ring, 0x0000001d);
1034
1035 OUT_PKT0(ring, REG_A4XX_PC_GS_PARAM, 1);
1036 OUT_RING(ring, 0x00000000);
1037
1038 OUT_PKT0(ring, REG_A4XX_UNKNOWN_21E6, 1);
1039 OUT_RING(ring, 0x00000001);
1040
1041 OUT_PKT0(ring, REG_A4XX_PC_HS_PARAM, 1);
1042 OUT_RING(ring, 0x00000000);
1043
1044 OUT_PKT0(ring, REG_A4XX_UNKNOWN_22D7, 1);
1045 OUT_RING(ring, 0x00000000);
1046
1047 OUT_PKT0(ring, REG_A4XX_TPL1_TP_TEX_OFFSET, 1);
1048 OUT_RING(ring, 0x00000000);
1049
1050 OUT_PKT0(ring, REG_A4XX_TPL1_TP_TEX_COUNT, 1);
1051 OUT_RING(ring, A4XX_TPL1_TP_TEX_COUNT_VS(16) | A4XX_TPL1_TP_TEX_COUNT_HS(0) |
1052 A4XX_TPL1_TP_TEX_COUNT_DS(0) |
1053 A4XX_TPL1_TP_TEX_COUNT_GS(0));
1054
1055 OUT_PKT0(ring, REG_A4XX_TPL1_TP_FS_TEX_COUNT, 1);
1056 OUT_RING(ring, 16);
1057
1058 /* we don't use this yet.. probably best to disable.. */
1059 OUT_PKT3(ring, CP_SET_DRAW_STATE, 2);
1060 OUT_RING(ring, CP_SET_DRAW_STATE__0_COUNT(0) |
1061 CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS |
1062 CP_SET_DRAW_STATE__0_GROUP_ID(0));
1063 OUT_RING(ring, CP_SET_DRAW_STATE__1_ADDR_LO(0));
1064
1065 OUT_PKT0(ring, REG_A4XX_SP_VS_PVT_MEM_PARAM, 2);
1066 OUT_RING(ring, 0x08000001); /* SP_VS_PVT_MEM_PARAM */
1067 OUT_RELOC(ring, fd4_ctx->vs_pvt_mem, 0, 0, 0); /* SP_VS_PVT_MEM_ADDR */
1068
1069 OUT_PKT0(ring, REG_A4XX_SP_FS_PVT_MEM_PARAM, 2);
1070 OUT_RING(ring, 0x08000001); /* SP_FS_PVT_MEM_PARAM */
1071 OUT_RELOC(ring, fd4_ctx->fs_pvt_mem, 0, 0, 0); /* SP_FS_PVT_MEM_ADDR */
1072
1073 OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
1074 OUT_RING(ring, A4XX_GRAS_SC_CONTROL_RENDER_MODE(RB_RENDERING_PASS) |
1075 A4XX_GRAS_SC_CONTROL_MSAA_DISABLE |
1076 A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(MSAA_ONE) |
1077 A4XX_GRAS_SC_CONTROL_RASTER_MODE(0));
1078
1079 OUT_PKT0(ring, REG_A4XX_RB_MSAA_CONTROL, 1);
1080 OUT_RING(ring, A4XX_RB_MSAA_CONTROL_DISABLE |
1081 A4XX_RB_MSAA_CONTROL_SAMPLES(MSAA_ONE));
1082
1083 OUT_PKT0(ring, REG_A4XX_GRAS_CL_GB_CLIP_ADJ, 1);
1084 OUT_RING(ring, A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(0) |
1085 A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(0));
1086
1087 OUT_PKT0(ring, REG_A4XX_RB_ALPHA_CONTROL, 1);
1088 OUT_RING(ring, A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(FUNC_ALWAYS));
1089
1090 OUT_PKT0(ring, REG_A4XX_RB_FS_OUTPUT, 1);
1091 OUT_RING(ring, A4XX_RB_FS_OUTPUT_SAMPLE_MASK(0xffff));
1092
1093 OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1);
1094 OUT_RING(ring, 0x0);
1095
1096 fd_hw_query_enable(batch, ring);
1097 }
1098
1099 static void
fd4_mem_to_mem(struct fd_ringbuffer * ring,struct pipe_resource * dst,unsigned dst_off,struct pipe_resource * src,unsigned src_off,unsigned sizedwords)1100 fd4_mem_to_mem(struct fd_ringbuffer *ring, struct pipe_resource *dst,
1101 unsigned dst_off, struct pipe_resource *src, unsigned src_off,
1102 unsigned sizedwords)
1103 {
1104 struct fd_bo *src_bo = fd_resource(src)->bo;
1105 struct fd_bo *dst_bo = fd_resource(dst)->bo;
1106 unsigned i;
1107
1108 for (i = 0; i < sizedwords; i++) {
1109 OUT_PKT3(ring, CP_MEM_TO_MEM, 3);
1110 OUT_RING(ring, 0x00000000);
1111 OUT_RELOC(ring, dst_bo, dst_off, 0, 0);
1112 OUT_RELOC(ring, src_bo, src_off, 0, 0);
1113
1114 dst_off += 4;
1115 src_off += 4;
1116 }
1117 }
1118
1119 void
fd4_emit_init_screen(struct pipe_screen * pscreen)1120 fd4_emit_init_screen(struct pipe_screen *pscreen)
1121 {
1122 struct fd_screen *screen = fd_screen(pscreen);
1123
1124 screen->emit_ib = fd4_emit_ib;
1125 screen->mem_to_mem = fd4_mem_to_mem;
1126 }
1127
1128 void
fd4_emit_init(struct pipe_context * pctx)1129 fd4_emit_init(struct pipe_context *pctx)
1130 {
1131 }
1132