1 /*
2 * Copyright (c) 2012-2015 Etnaviv Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Wladimir J. van der Laan <laanwj@gmail.com>
25 * Christian Gmeiner <christian.gmeiner@gmail.com>
26 */
27
28 #include "etnaviv_context.h"
29
30 #include "etnaviv_blend.h"
31 #include "etnaviv_clear_blit.h"
32 #include "etnaviv_compiler.h"
33 #include "etnaviv_debug.h"
34 #include "etnaviv_emit.h"
35 #include "etnaviv_fence.h"
36 #include "etnaviv_ml.h"
37 #include "etnaviv_query.h"
38 #include "etnaviv_query_acc.h"
39 #include "etnaviv_rasterizer.h"
40 #include "etnaviv_resource.h"
41 #include "etnaviv_screen.h"
42 #include "etnaviv_shader.h"
43 #include "etnaviv_state.h"
44 #include "etnaviv_surface.h"
45 #include "etnaviv_texture.h"
46 #include "etnaviv_transfer.h"
47 #include "etnaviv_translate.h"
48 #include "etnaviv_zsa.h"
49
50 #include "pipe/p_context.h"
51 #include "pipe/p_state.h"
52 #include "util/hash_table.h"
53 #include "util/u_blitter.h"
54 #include "util/u_draw.h"
55 #include "util/u_helpers.h"
56 #include "util/u_inlines.h"
57 #include "util/u_memory.h"
58 #include "util/u_prim.h"
59 #include "util/u_upload_mgr.h"
60 #include "util/u_debug_cb.h"
61 #include "util/u_surface.h"
62 #include "util/u_transfer.h"
63
64 static inline void
etna_emit_nop_with_data(struct etna_cmd_stream * stream,uint32_t value)65 etna_emit_nop_with_data(struct etna_cmd_stream *stream, uint32_t value)
66 {
67 etna_cmd_stream_emit(stream, VIV_FE_NOP_HEADER_OP_NOP);
68 etna_cmd_stream_emit(stream, value);
69 }
70
71 static void
etna_emit_string_marker(struct pipe_context * pctx,const char * string,int len)72 etna_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
73 {
74 struct etna_context *ctx = etna_context(pctx);
75 struct etna_cmd_stream *stream = ctx->stream;
76 const uint32_t *buf = (const void *)string;
77
78 etna_cmd_stream_reserve(stream, len * 2);
79
80 while (len >= 4) {
81 etna_emit_nop_with_data(stream, *buf);
82 buf++;
83 len -= 4;
84 }
85
86 /* copy remainder bytes without reading past end of input string */
87 if (len > 0) {
88 uint32_t w = 0;
89 memcpy(&w, buf, len);
90 etna_emit_nop_with_data(stream, w);
91 }
92 }
93
94 static void
etna_set_frontend_noop(struct pipe_context * pctx,bool enable)95 etna_set_frontend_noop(struct pipe_context *pctx, bool enable)
96 {
97 struct etna_context *ctx = etna_context(pctx);
98
99 pctx->flush(pctx, NULL, 0);
100 ctx->is_noop = enable;
101 }
102
103 static void
etna_context_destroy(struct pipe_context * pctx)104 etna_context_destroy(struct pipe_context *pctx)
105 {
106 struct etna_context *ctx = etna_context(pctx);
107
108 if (ctx->pending_resources)
109 _mesa_hash_table_destroy(ctx->pending_resources, NULL);
110
111 if (ctx->updated_resources)
112 _mesa_set_destroy(ctx->updated_resources, NULL);
113
114 if (ctx->flush_resources)
115 _mesa_set_destroy(ctx->flush_resources, NULL);
116
117 util_copy_framebuffer_state(&ctx->framebuffer_s, NULL);
118
119 if (ctx->blitter)
120 util_blitter_destroy(ctx->blitter);
121
122 if (pctx->stream_uploader)
123 u_upload_destroy(pctx->stream_uploader);
124
125 if (ctx->stream)
126 etna_cmd_stream_del(ctx->stream);
127
128 etna_texture_fini(pctx);
129
130 slab_destroy_child(&ctx->transfer_pool);
131
132 if (ctx->in_fence_fd != -1)
133 close(ctx->in_fence_fd);
134
135 FREE(pctx);
136 }
137
138 /* Update render state where needed based on draw operation */
139 static void
etna_update_state_for_draw(struct etna_context * ctx,const struct pipe_draw_info * info)140 etna_update_state_for_draw(struct etna_context *ctx, const struct pipe_draw_info *info)
141 {
142 /* Handle primitive restart:
143 * - If not an indexed draw, we don't care about the state of the primitive restart bit.
144 * - Otherwise, set the bit in INDEX_STREAM_CONTROL in the index buffer state
145 * accordingly
146 * - If the value of the INDEX_STREAM_CONTROL register changed due to this, or
147 * primitive restart is enabled and the restart index changed, mark the index
148 * buffer state as dirty
149 */
150
151 if (info->index_size) {
152 uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
153
154 if (info->primitive_restart)
155 new_control |= VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
156 else
157 new_control &= ~VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
158
159 if (ctx->index_buffer.FE_INDEX_STREAM_CONTROL != new_control ||
160 (info->primitive_restart && ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX != info->restart_index)) {
161 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = new_control;
162 ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX = info->restart_index;
163 ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
164 }
165 }
166 }
167
168 static bool
etna_get_vs(struct etna_context * ctx,struct etna_shader_key * const key)169 etna_get_vs(struct etna_context *ctx, struct etna_shader_key* const key)
170 {
171 const struct etna_shader_variant *old = ctx->shader.vs;
172
173 ctx->shader.vs = etna_shader_variant(ctx->shader.bind_vs, key, &ctx->base.debug, true);
174
175 if (!ctx->shader.vs)
176 return false;
177
178 if (old != ctx->shader.vs)
179 ctx->dirty |= ETNA_DIRTY_SHADER;
180
181 return true;
182 }
183
184 static bool
etna_get_fs(struct etna_context * ctx,struct etna_shader_key * const key)185 etna_get_fs(struct etna_context *ctx, struct etna_shader_key* const key)
186 {
187 const struct etna_shader_variant *old = ctx->shader.fs;
188
189 /* update the key if we need to run nir_lower_sample_tex_compare(..). */
190 if (ctx->screen->info->halti < 2 &&
191 (ctx->dirty & (ETNA_DIRTY_SAMPLERS | ETNA_DIRTY_SAMPLER_VIEWS))) {
192
193 for (unsigned int i = 0; i < ctx->num_fragment_sampler_views; i++) {
194 if (ctx->sampler[i]->compare_mode == PIPE_TEX_COMPARE_NONE)
195 continue;
196
197 key->has_sample_tex_compare = 1;
198 key->num_texture_states = ctx->num_fragment_sampler_views;
199
200 key->tex_swizzle[i].swizzle_r = ctx->sampler_view[i]->swizzle_r;
201 key->tex_swizzle[i].swizzle_g = ctx->sampler_view[i]->swizzle_g;
202 key->tex_swizzle[i].swizzle_b = ctx->sampler_view[i]->swizzle_b;
203 key->tex_swizzle[i].swizzle_a = ctx->sampler_view[i]->swizzle_a;
204
205 key->tex_compare_func[i] = ctx->sampler[i]->compare_func;
206 }
207 }
208
209 ctx->shader.fs = etna_shader_variant(ctx->shader.bind_fs, key, &ctx->base.debug, true);
210
211 if (!ctx->shader.fs)
212 return false;
213
214 if (old != ctx->shader.fs)
215 ctx->dirty |= ETNA_DIRTY_SHADER;
216
217 return true;
218 }
219
220 static void
etna_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)221 etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
222 unsigned drawid_offset,
223 const struct pipe_draw_indirect_info *indirect,
224 const struct pipe_draw_start_count_bias *draws,
225 unsigned num_draws)
226 {
227 if (num_draws > 1) {
228 util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
229 return;
230 }
231
232 if (!indirect && (!draws[0].count || !info->instance_count))
233 return;
234
235 struct etna_context *ctx = etna_context(pctx);
236 struct etna_screen *screen = ctx->screen;
237 struct pipe_framebuffer_state *pfb = &ctx->framebuffer_s;
238 uint32_t draw_mode;
239 unsigned i;
240
241 if (!indirect &&
242 !info->primitive_restart &&
243 !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
244 return;
245
246 if (ctx->vertex_elements == NULL || ctx->vertex_elements->num_elements == 0)
247 return; /* Nothing to do */
248
249 if (unlikely(ctx->rasterizer->cull_face == PIPE_FACE_FRONT_AND_BACK &&
250 u_decomposed_prim(info->mode) == MESA_PRIM_TRIANGLES))
251 return;
252
253 if (!etna_render_condition_check(pctx))
254 return;
255
256 int prims = u_decomposed_prims_for_vertices(info->mode, draws[0].count);
257 if (!indirect && unlikely(prims <= 0)) {
258 DBG("Invalid draw primitive mode=%i or no primitives to be drawn", info->mode);
259 return;
260 }
261
262 draw_mode = translate_draw_mode(info->mode);
263 if (draw_mode == ETNA_NO_MATCH) {
264 BUG("Unsupported draw mode");
265 return;
266 }
267
268 /* Upload a user index buffer. */
269 unsigned index_offset = 0;
270 struct pipe_resource *indexbuf = NULL;
271
272 if (info->index_size) {
273 indexbuf = info->has_user_indices ? NULL : info->index.resource;
274 if (info->has_user_indices &&
275 !util_upload_index_buffer(pctx, info, &draws[0], &indexbuf, &index_offset, 4)) {
276 BUG("Index buffer upload failed.");
277 return;
278 }
279 /* Add start to index offset, when rendering indexed */
280 index_offset += draws[0].start * info->index_size;
281
282 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
283 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
284 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
285 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
286
287 if (!ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
288 BUG("Unsupported or no index buffer");
289 return;
290 }
291 } else {
292 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = 0;
293 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = 0;
294 ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = 0;
295 ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
296 }
297 ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
298
299 struct etna_shader_key key = {
300 .front_ccw = ctx->rasterizer->front_ccw,
301 .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
302 .sprite_coord_yinvert = !!ctx->rasterizer->sprite_coord_mode,
303 };
304
305 if (screen->info->halti >= 5)
306 key.flatshade = ctx->rasterizer->flatshade;
307
308 for (i = 0; i < pfb->nr_cbufs; i++) {
309 if (pfb->cbufs[i])
310 key.frag_rb_swap |= !!translate_pe_format_rb_swap(pfb->cbufs[i]->format) << i;
311 }
312
313 if (!etna_get_vs(ctx, &key) || !etna_get_fs(ctx, &key)) {
314 BUG("compiled shaders are not okay");
315 return;
316 }
317
318 /* Update any derived state */
319 if (!etna_state_update(ctx))
320 return;
321
322 /*
323 * Figure out the buffers/features we need:
324 */
325 if (ctx->dirty & ETNA_DIRTY_ZSA) {
326 if (etna_depth_enabled(ctx))
327 resource_written(ctx, pfb->zsbuf->texture);
328
329 if (etna_stencil_enabled(ctx))
330 resource_written(ctx, pfb->zsbuf->texture);
331 }
332
333 if (ctx->dirty & ETNA_DIRTY_FRAMEBUFFER) {
334 for (i = 0; i < pfb->nr_cbufs; i++) {
335 struct pipe_resource *surf;
336
337 if (!pfb->cbufs[i])
338 continue;
339
340 surf = pfb->cbufs[i]->texture;
341 resource_written(ctx, surf);
342 }
343 }
344
345 if (ctx->dirty & ETNA_DIRTY_SHADER) {
346 /* Mark constant buffers as being read */
347 u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_VERTEX].enabled_mask)
348 resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_VERTEX].cb[i].buffer);
349
350 u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].enabled_mask)
351 resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].cb[i].buffer);
352 }
353
354 if (ctx->dirty & ETNA_DIRTY_VERTEX_BUFFERS) {
355 /* Mark VBOs as being read */
356 u_foreach_bit(i, ctx->vertex_buffer.enabled_mask) {
357 assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
358 resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
359 }
360 }
361
362 if (ctx->dirty & ETNA_DIRTY_INDEX_BUFFER) {
363 /* Mark index buffer as being read */
364 resource_read(ctx, indexbuf);
365 }
366
367 /* Mark textures as being read */
368 for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
369 if (ctx->sampler_view[i]) {
370 if (ctx->dirty & ETNA_DIRTY_SAMPLER_VIEWS)
371 resource_read(ctx, ctx->sampler_view[i]->texture);
372
373 /* if texture was modified since the last update,
374 * we need to clear the texture cache and possibly
375 * resolve/update ts
376 */
377 etna_update_sampler_source(ctx->sampler_view[i], i);
378 }
379 }
380
381 if (indirect) {
382 /*
383 * When the indirect buffer is written by the GPU, e.g. by a compute shader,
384 * the shader L1 cache needs to be flushed for the data to become visible to
385 * the FE. Also there needs to be a PE/FE stall enforced between commands
386 * that generate the indirect buffer content and the indirect draw.
387 *
388 * This isn't implemented right now, so we don't support GPU written indirect buffers for now.
389 */
390 assert(!(etna_resource_status(ctx, etna_resource(indirect->buffer)) & ETNA_PENDING_WRITE));
391 resource_read(ctx, indirect->buffer);
392 }
393
394 ctx->stats.prims_generated += u_reduced_prims_for_vertices(info->mode, draws[0].count);
395 ctx->stats.draw_calls++;
396
397 /* Update state for this draw operation */
398 etna_update_state_for_draw(ctx, info);
399
400 /* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
401 etna_emit_state(ctx);
402
403 if (!VIV_FEATURE(screen, ETNA_FEATURE_NEW_GPIPE)) {
404 switch (draw_mode) {
405 case PRIMITIVE_TYPE_LINE_LOOP:
406 case PRIMITIVE_TYPE_LINE_STRIP:
407 case PRIMITIVE_TYPE_TRIANGLE_STRIP:
408 case PRIMITIVE_TYPE_TRIANGLE_FAN:
409 etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
410 VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0 |
411 VIVS_GL_VERTEX_ELEMENT_CONFIG_REUSE);
412 break;
413 default:
414 etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
415 VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0);
416 break;
417 }
418 }
419
420 if (indirect) {
421 etna_draw_indirect(ctx->stream, draw_mode, indirect->buffer, indirect->offset, info->index_size);
422 } else {
423 if (screen->info->halti >= 2) {
424 /* On HALTI2+ (GC3000 and higher) only use instanced drawing commands, as the blob does */
425 etna_draw_instanced(ctx->stream, info->index_size, draw_mode, info->instance_count,
426 draws[0].count, info->index_size ? draws->index_bias : draws[0].start);
427 } else {
428 if (info->index_size)
429 etna_draw_indexed_primitives(ctx->stream, draw_mode, 0, prims, draws->index_bias);
430 else
431 etna_draw_primitives(ctx->stream, draw_mode, draws[0].start, prims);
432 }
433 }
434
435 if (DBG_ENABLED(ETNA_DBG_DRAW_STALL)) {
436 /* Stall the FE after every draw operation. This allows better
437 * debug of GPU hang conditions, as the FE will indicate which
438 * draw op has caused the hang. */
439 etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
440 }
441
442 if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
443 pctx->flush(pctx, NULL, 0);
444
445 for (i = 0; i < pfb->nr_cbufs; i++) {
446 if (pfb->cbufs[i])
447 etna_resource_level_mark_changed(etna_surface(pfb->cbufs[i])->level);
448 }
449
450 if (ctx->framebuffer_s.zsbuf)
451 etna_resource_level_mark_changed(etna_surface(ctx->framebuffer_s.zsbuf)->level);
452 if (info->index_size && indexbuf != info->index.resource)
453 pipe_resource_reference(&indexbuf, NULL);
454 }
455
456 static void
etna_reset_gpu_state(struct etna_context * ctx)457 etna_reset_gpu_state(struct etna_context *ctx)
458 {
459 struct etna_cmd_stream *stream = ctx->stream;
460 struct etna_screen *screen = ctx->screen;
461 uint32_t dummy_attribs[VIVS_NFE_GENERIC_ATTRIB__LEN] = { 0 };
462
463 if (ctx->compute_only) {
464 /* compute only context does not make use of any of the dirty state tracking. */
465 assert(ctx->dirty == 0);
466 assert(ctx->dirty_sampler_views == 0);
467 assert(ctx->prev_active_samplers == 0);
468
469 etna_cmd_stream_mark_end_of_context_init(stream);
470
471 return;
472 }
473
474 etna_set_state(stream, VIVS_GL_API_MODE, VIVS_GL_API_MODE_OPENGL);
475 etna_set_state(stream, VIVS_PA_W_CLIP_LIMIT, 0x34000001);
476 etna_set_state(stream, VIVS_PA_FLAGS, 0x00000000); /* blob sets ZCONVERT_BYPASS on GC3000+, this messes up z for us */
477 etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A80, 0x38a01404);
478 etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A84, fui(8192.0));
479 etna_set_state(stream, VIVS_PA_ZFARCLIPPING, 0x00000000);
480 etna_set_state(stream, VIVS_RA_HDEPTH_CONTROL, 0x00007000);
481 etna_set_state(stream, VIVS_PS_CONTROL_EXT, 0x00000000);
482
483 /* There is no HALTI0 specific state */
484 if (screen->info->halti >= 1) { /* Only on HALTI1+ */
485 etna_set_state(stream, VIVS_VS_HALTI1_UNK00884, 0x00000808);
486 }
487 if (screen->info->halti >= 2) { /* Only on HALTI2+ */
488 etna_set_state(stream, VIVS_RA_UNK00E0C, 0x00000000);
489 }
490 if (screen->info->halti >= 3) { /* Only on HALTI3+ */
491 etna_set_state(stream, VIVS_PS_HALTI3_UNK0103C, 0x76543210);
492 }
493 if (screen->info->halti >= 4) { /* Only on HALTI4+ */
494 etna_set_state(stream, VIVS_PS_MSAA_CONFIG, 0x6fffffff & 0xf70fffff & 0xfff6ffff &
495 0xffff6fff & 0xfffff6ff & 0xffffff7f);
496 etna_set_state(stream, VIVS_PE_HALTI4_UNK014C0, 0x00000000);
497 }
498 if (screen->info->halti >= 5) { /* Only on HALTI5+ */
499 etna_set_state(stream, VIVS_NTE_DESCRIPTOR_CONTROL,
500 COND(!DBG_ENABLED(ETNA_DBG_NO_TEXDESC), VIVS_NTE_DESCRIPTOR_CONTROL_ENABLE));
501 etna_set_state(stream, VIVS_FE_HALTI5_UNK007D8, 0x00000002);
502 etna_set_state(stream, VIVS_PS_SAMPLER_BASE, 0x00000000);
503 etna_set_state(stream, VIVS_VS_SAMPLER_BASE, 0x00000020);
504 etna_set_state(stream, VIVS_SH_CONFIG, VIVS_SH_CONFIG_RTNE_ROUNDING);
505 }
506
507 if (VIV_FEATURE(screen, ETNA_FEATURE_BUG_FIXES18))
508 etna_set_state(stream, VIVS_GL_BUG_FIXES, 0x6);
509
510 if (!screen->specs.use_blt) {
511 /* Enable SINGLE_BUFFER for resolve, if supported */
512 etna_set_state(stream, VIVS_RS_SINGLE_BUFFER, COND(screen->specs.single_buffer, VIVS_RS_SINGLE_BUFFER_ENABLE));
513 }
514
515 if (screen->info->halti >= 5 && !DBG_ENABLED(ETNA_DBG_NO_TEXDESC)) {
516 /* TXDESC cache flush - do this once at the beginning, as texture
517 * descriptors are only written by the CPU once, then patched by the kernel
518 * before command stream submission. It does not need flushing if the
519 * referenced image data changes.
520 */
521 etna_set_state(stream, VIVS_NTE_DESCRIPTOR_FLUSH, 0);
522 etna_set_state(stream, VIVS_GL_FLUSH_CACHE,
523 VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 |
524 VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13);
525
526 /* Icache invalidate (should do this on shader change?) */
527 etna_set_state(stream, VIVS_VS_ICACHE_INVALIDATE,
528 VIVS_VS_ICACHE_INVALIDATE_UNK0 | VIVS_VS_ICACHE_INVALIDATE_UNK1 |
529 VIVS_VS_ICACHE_INVALIDATE_UNK2 | VIVS_VS_ICACHE_INVALIDATE_UNK3 |
530 VIVS_VS_ICACHE_INVALIDATE_UNK4);
531 }
532
533 /* It seems that some GPUs (at least some GC400 have shown this behavior)
534 * come out of reset with random vertex attributes enabled and also don't
535 * disable them on the write to the first config register as normal. Enabling
536 * all attributes seems to provide the GPU with the required edge to actually
537 * disable the unused attributes on the next draw.
538 */
539 if (screen->info->halti >= 5) {
540 etna_set_state_multi(stream, VIVS_NFE_GENERIC_ATTRIB_CONFIG0(0),
541 VIVS_NFE_GENERIC_ATTRIB__LEN, dummy_attribs);
542 } else {
543 etna_set_state_multi(stream, VIVS_FE_VERTEX_ELEMENT_CONFIG(0),
544 screen->info->halti >= 0 ? 16 : 12, dummy_attribs);
545 }
546
547 etna_cmd_stream_mark_end_of_context_init(stream);
548
549 ctx->dirty = ~0L;
550 ctx->dirty_sampler_views = ~0L;
551 ctx->prev_active_samplers = ~0L;
552 }
553
554 void
etna_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags,bool internal)555 etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
556 enum pipe_flush_flags flags, bool internal)
557 {
558 struct etna_context *ctx = etna_context(pctx);
559 int out_fence_fd = -1;
560
561 list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
562 etna_acc_query_suspend(aq, ctx);
563
564 if (!internal) {
565 /* flush all resources that need an implicit flush */
566 set_foreach(ctx->flush_resources, entry) {
567 struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
568
569 pctx->flush_resource(pctx, prsc);
570 pipe_resource_reference(&prsc, NULL);
571 }
572 _mesa_set_clear(ctx->flush_resources, NULL);
573
574 /* reset shared resources update tracking */
575 set_foreach(ctx->updated_resources, entry) {
576 struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
577 pipe_resource_reference(&prsc, NULL);
578 }
579 _mesa_set_clear(ctx->updated_resources, NULL);
580 }
581
582 etna_cmd_stream_flush(ctx->stream, ctx->in_fence_fd,
583 (flags & PIPE_FLUSH_FENCE_FD) ? &out_fence_fd : NULL,
584 ctx->is_noop);
585
586 list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
587 etna_acc_query_resume(aq, ctx);
588
589 if (fence)
590 *fence = etna_fence_create(pctx, out_fence_fd);
591
592 _mesa_hash_table_clear(ctx->pending_resources, NULL);
593
594 etna_reset_gpu_state(ctx);
595 }
596
597 static void
etna_context_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)598 etna_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
599 enum pipe_flush_flags flags)
600 {
601 etna_flush(pctx, fence, flags, false);
602 }
603
604 static void
etna_context_force_flush(struct etna_cmd_stream * stream,void * priv)605 etna_context_force_flush(struct etna_cmd_stream *stream, void *priv)
606 {
607 struct pipe_context *pctx = priv;
608
609 etna_flush(pctx, NULL, 0, true);
610
611 /* update derived states as the context is now fully dirty */
612 etna_state_update(etna_context(pctx));
613 }
614
615 void
etna_context_add_flush_resource(struct etna_context * ctx,struct pipe_resource * rsc)616 etna_context_add_flush_resource(struct etna_context *ctx,
617 struct pipe_resource *rsc)
618 {
619 bool found;
620
621 _mesa_set_search_or_add(ctx->flush_resources, rsc, &found);
622
623 if (!found)
624 pipe_reference(NULL, &rsc->reference);
625 }
626
627 static void
etna_set_debug_callback(struct pipe_context * pctx,const struct util_debug_callback * cb)628 etna_set_debug_callback(struct pipe_context *pctx,
629 const struct util_debug_callback *cb)
630 {
631 struct etna_context *ctx = etna_context(pctx);
632 struct etna_screen *screen = ctx->screen;
633
634 util_queue_finish(&screen->shader_compiler_queue);
635 u_default_set_debug_callback(pctx, cb);
636 }
637
638 struct pipe_context *
etna_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)639 etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
640 {
641 struct etna_context *ctx = CALLOC_STRUCT(etna_context);
642 struct etna_screen *screen;
643 struct pipe_context *pctx;
644 struct etna_pipe *pipe;
645 bool compute_only = flags & PIPE_CONTEXT_COMPUTE_ONLY;
646
647 if (ctx == NULL)
648 return NULL;
649
650 pctx = &ctx->base;
651 pctx->priv = ctx;
652 pctx->screen = pscreen;
653 pctx->stream_uploader = u_upload_create_default(pctx);
654 if (!pctx->stream_uploader)
655 goto fail;
656 pctx->const_uploader = pctx->stream_uploader;
657
658 screen = etna_screen(pscreen);
659 pipe = (compute_only && screen->pipe_nn) ? screen->pipe_nn : screen->pipe;
660 ctx->stream = etna_cmd_stream_new(pipe, 0x2000,
661 &etna_context_force_flush, pctx);
662 if (ctx->stream == NULL)
663 goto fail;
664
665 ctx->pending_resources = _mesa_pointer_hash_table_create(NULL);
666 if (!ctx->pending_resources)
667 goto fail;
668
669 ctx->flush_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
670 _mesa_key_pointer_equal);
671 if (!ctx->flush_resources)
672 goto fail;
673
674 ctx->updated_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
675 _mesa_key_pointer_equal);
676 if (!ctx->updated_resources)
677 goto fail;
678
679 /* context ctxate setup */
680 ctx->screen = screen;
681 /* need some sane default in case gallium frontends don't set some state: */
682 ctx->sample_mask = 0xffff;
683
684 ctx->compute_only = compute_only;
685
686 /* Set sensible defaults for state */
687 etna_reset_gpu_state(ctx);
688
689 ctx->in_fence_fd = -1;
690
691 pctx->destroy = etna_context_destroy;
692 pctx->draw_vbo = etna_draw_vbo;
693 pctx->ml_subgraph_create = etna_ml_subgraph_create;
694 pctx->ml_subgraph_invoke = etna_ml_subgraph_invoke;
695 pctx->ml_subgraph_read_output = etna_ml_subgraph_read_outputs;
696 pctx->ml_subgraph_destroy = etna_ml_subgraph_destroy;
697 pctx->flush = etna_context_flush;
698 pctx->set_debug_callback = etna_set_debug_callback;
699 pctx->create_fence_fd = etna_create_fence_fd;
700 pctx->fence_server_sync = etna_fence_server_sync;
701 pctx->emit_string_marker = etna_emit_string_marker;
702 pctx->set_frontend_noop = etna_set_frontend_noop;
703 pctx->clear_buffer = u_default_clear_buffer;
704 pctx->clear_texture = u_default_clear_texture;
705
706 /* creation of compile states */
707 pctx->create_blend_state = etna_blend_state_create;
708 pctx->create_rasterizer_state = etna_rasterizer_state_create;
709 pctx->create_depth_stencil_alpha_state = etna_zsa_state_create;
710
711 etna_clear_blit_init(pctx);
712 etna_query_context_init(pctx);
713 etna_state_init(pctx);
714 etna_surface_init(pctx);
715 etna_shader_init(pctx);
716 etna_texture_init(pctx);
717 etna_transfer_init(pctx);
718
719 if (!ctx->compute_only) {
720 ctx->blitter = util_blitter_create(pctx);
721 if (!ctx->blitter)
722 goto fail;
723 }
724
725 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
726 list_inithead(&ctx->active_acc_queries);
727
728 return pctx;
729
730 fail:
731 pctx->destroy(pctx);
732
733 return NULL;
734 }
735
736 bool
etna_render_condition_check(struct pipe_context * pctx)737 etna_render_condition_check(struct pipe_context *pctx)
738 {
739 struct etna_context *ctx = etna_context(pctx);
740
741 if (!ctx->cond_query)
742 return true;
743
744 perf_debug_ctx(ctx, "Implementing conditional rendering on the CPU");
745
746 union pipe_query_result res = { 0 };
747 bool wait =
748 ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
749 ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
750
751 if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
752 return (bool)res.u64 != ctx->cond_cond;
753
754 return true;
755 }
756