• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012-2015 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Wladimir J. van der Laan <laanwj@gmail.com>
25  *    Christian Gmeiner <christian.gmeiner@gmail.com>
26  */
27 
28 #include "etnaviv_context.h"
29 
30 #include "etnaviv_blend.h"
31 #include "etnaviv_clear_blit.h"
32 #include "etnaviv_compiler.h"
33 #include "etnaviv_debug.h"
34 #include "etnaviv_emit.h"
35 #include "etnaviv_fence.h"
36 #include "etnaviv_ml.h"
37 #include "etnaviv_query.h"
38 #include "etnaviv_query_acc.h"
39 #include "etnaviv_rasterizer.h"
40 #include "etnaviv_resource.h"
41 #include "etnaviv_screen.h"
42 #include "etnaviv_shader.h"
43 #include "etnaviv_state.h"
44 #include "etnaviv_surface.h"
45 #include "etnaviv_texture.h"
46 #include "etnaviv_transfer.h"
47 #include "etnaviv_translate.h"
48 #include "etnaviv_zsa.h"
49 
50 #include "pipe/p_context.h"
51 #include "pipe/p_state.h"
52 #include "util/hash_table.h"
53 #include "util/u_blitter.h"
54 #include "util/u_draw.h"
55 #include "util/u_helpers.h"
56 #include "util/u_inlines.h"
57 #include "util/u_memory.h"
58 #include "util/u_prim.h"
59 #include "util/u_upload_mgr.h"
60 #include "util/u_debug_cb.h"
61 #include "util/u_surface.h"
62 #include "util/u_transfer.h"
63 
64 #include "hw/common.xml.h"
65 
66 static inline void
etna_emit_nop_with_data(struct etna_cmd_stream * stream,uint32_t value)67 etna_emit_nop_with_data(struct etna_cmd_stream *stream, uint32_t value)
68 {
69    etna_cmd_stream_emit(stream, VIV_FE_NOP_HEADER_OP_NOP);
70    etna_cmd_stream_emit(stream, value);
71 }
72 
73 static void
etna_emit_string_marker(struct pipe_context * pctx,const char * string,int len)74 etna_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
75 {
76    struct etna_context *ctx = etna_context(pctx);
77    struct etna_cmd_stream *stream = ctx->stream;
78    const uint32_t *buf = (const void *)string;
79 
80    etna_cmd_stream_reserve(stream, len * 2);
81 
82    while (len >= 4) {
83       etna_emit_nop_with_data(stream, *buf);
84       buf++;
85       len -= 4;
86    }
87 
88    /* copy remainder bytes without reading past end of input string */
89    if (len > 0) {
90       uint32_t w = 0;
91       memcpy(&w, buf, len);
92       etna_emit_nop_with_data(stream, w);
93    }
94 }
95 
96 static void
etna_set_frontend_noop(struct pipe_context * pctx,bool enable)97 etna_set_frontend_noop(struct pipe_context *pctx, bool enable)
98 {
99    struct etna_context *ctx = etna_context(pctx);
100 
101    pctx->flush(pctx, NULL, 0);
102    ctx->is_noop = enable;
103 }
104 
105 static void
etna_context_destroy(struct pipe_context * pctx)106 etna_context_destroy(struct pipe_context *pctx)
107 {
108    struct etna_context *ctx = etna_context(pctx);
109 
110    if (ctx->pending_resources)
111       _mesa_hash_table_destroy(ctx->pending_resources, NULL);
112 
113    if (ctx->updated_resources)
114       _mesa_set_destroy(ctx->updated_resources, NULL);
115 
116    if (ctx->flush_resources)
117       _mesa_set_destroy(ctx->flush_resources, NULL);
118 
119    util_copy_framebuffer_state(&ctx->framebuffer_s, NULL);
120 
121    if (ctx->blitter)
122       util_blitter_destroy(ctx->blitter);
123 
124    if (pctx->stream_uploader)
125       u_upload_destroy(pctx->stream_uploader);
126 
127    if (ctx->stream)
128       etna_cmd_stream_del(ctx->stream);
129 
130    etna_texture_fini(pctx);
131 
132    slab_destroy_child(&ctx->transfer_pool);
133 
134    if (ctx->in_fence_fd != -1)
135       close(ctx->in_fence_fd);
136 
137    FREE(pctx);
138 }
139 
140 /* Update render state where needed based on draw operation */
141 static void
etna_update_state_for_draw(struct etna_context * ctx,const struct pipe_draw_info * info)142 etna_update_state_for_draw(struct etna_context *ctx, const struct pipe_draw_info *info)
143 {
144    /* Handle primitive restart:
145     * - If not an indexed draw, we don't care about the state of the primitive restart bit.
146     * - Otherwise, set the bit in INDEX_STREAM_CONTROL in the index buffer state
147     *   accordingly
148     * - If the value of the INDEX_STREAM_CONTROL register changed due to this, or
149     *   primitive restart is enabled and the restart index changed, mark the index
150     *   buffer state as dirty
151     */
152 
153    if (info->index_size) {
154       uint32_t new_control = ctx->index_buffer.FE_INDEX_STREAM_CONTROL;
155 
156       if (info->primitive_restart)
157          new_control |= VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
158       else
159          new_control &= ~VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART;
160 
161       if (ctx->index_buffer.FE_INDEX_STREAM_CONTROL != new_control ||
162           (info->primitive_restart && ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX != info->restart_index)) {
163          ctx->index_buffer.FE_INDEX_STREAM_CONTROL = new_control;
164          ctx->index_buffer.FE_PRIMITIVE_RESTART_INDEX = info->restart_index;
165          ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
166       }
167    }
168 }
169 
170 static bool
etna_get_vs(struct etna_context * ctx,struct etna_shader_key * const key)171 etna_get_vs(struct etna_context *ctx, struct etna_shader_key* const key)
172 {
173    const struct etna_shader_variant *old = ctx->shader.vs;
174 
175    ctx->shader.vs = etna_shader_variant(ctx->shader.bind_vs, key, &ctx->base.debug, true);
176 
177    if (!ctx->shader.vs)
178       return false;
179 
180    if (old != ctx->shader.vs)
181       ctx->dirty |= ETNA_DIRTY_SHADER;
182 
183    return true;
184 }
185 
186 static bool
etna_get_fs(struct etna_context * ctx,struct etna_shader_key * const key)187 etna_get_fs(struct etna_context *ctx, struct etna_shader_key* const key)
188 {
189    const struct etna_shader_variant *old = ctx->shader.fs;
190 
191    /* update the key if we need to run nir_lower_sample_tex_compare(..). */
192    if (ctx->screen->specs.halti < 2 &&
193        (ctx->dirty & (ETNA_DIRTY_SAMPLERS | ETNA_DIRTY_SAMPLER_VIEWS))) {
194 
195       for (unsigned int i = 0; i < ctx->num_fragment_sampler_views; i++) {
196          if (ctx->sampler[i]->compare_mode == PIPE_TEX_COMPARE_NONE)
197             continue;
198 
199          key->has_sample_tex_compare = 1;
200          key->num_texture_states = ctx->num_fragment_sampler_views;
201 
202          key->tex_swizzle[i].swizzle_r = ctx->sampler_view[i]->swizzle_r;
203          key->tex_swizzle[i].swizzle_g = ctx->sampler_view[i]->swizzle_g;
204          key->tex_swizzle[i].swizzle_b = ctx->sampler_view[i]->swizzle_b;
205          key->tex_swizzle[i].swizzle_a = ctx->sampler_view[i]->swizzle_a;
206 
207          key->tex_compare_func[i] = ctx->sampler[i]->compare_func;
208       }
209    }
210 
211    ctx->shader.fs = etna_shader_variant(ctx->shader.bind_fs, key, &ctx->base.debug, true);
212 
213    if (!ctx->shader.fs)
214       return false;
215 
216    if (old != ctx->shader.fs)
217       ctx->dirty |= ETNA_DIRTY_SHADER;
218 
219    return true;
220 }
221 
222 static void
etna_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)223 etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
224               unsigned drawid_offset,
225               const struct pipe_draw_indirect_info *indirect,
226               const struct pipe_draw_start_count_bias *draws,
227               unsigned num_draws)
228 {
229    if (num_draws > 1) {
230       util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
231       return;
232    }
233 
234    if (!indirect && (!draws[0].count || !info->instance_count))
235       return;
236 
237    struct etna_context *ctx = etna_context(pctx);
238    struct etna_screen *screen = ctx->screen;
239    struct pipe_framebuffer_state *pfb = &ctx->framebuffer_s;
240    uint32_t draw_mode;
241    unsigned i;
242 
243    if (!indirect &&
244        !info->primitive_restart &&
245        !u_trim_pipe_prim(info->mode, (unsigned*)&draws[0].count))
246       return;
247 
248    if (ctx->vertex_elements == NULL || ctx->vertex_elements->num_elements == 0)
249       return; /* Nothing to do */
250 
251    if (unlikely(ctx->rasterizer->cull_face == PIPE_FACE_FRONT_AND_BACK &&
252                 u_decomposed_prim(info->mode) == MESA_PRIM_TRIANGLES))
253       return;
254 
255    if (!etna_render_condition_check(pctx))
256       return;
257 
258    int prims = u_decomposed_prims_for_vertices(info->mode, draws[0].count);
259    if (unlikely(prims <= 0)) {
260       DBG("Invalid draw primitive mode=%i or no primitives to be drawn", info->mode);
261       return;
262    }
263 
264    draw_mode = translate_draw_mode(info->mode);
265    if (draw_mode == ETNA_NO_MATCH) {
266       BUG("Unsupported draw mode");
267       return;
268    }
269 
270    /* Upload a user index buffer. */
271    unsigned index_offset = 0;
272    struct pipe_resource *indexbuf = NULL;
273 
274    if (info->index_size) {
275       indexbuf = info->has_user_indices ? NULL : info->index.resource;
276       if (info->has_user_indices &&
277           !util_upload_index_buffer(pctx, info, &draws[0], &indexbuf, &index_offset, 4)) {
278          BUG("Index buffer upload failed.");
279          return;
280       }
281       /* Add start to index offset, when rendering indexed */
282       index_offset += draws[0].start * info->index_size;
283 
284       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = etna_resource(indexbuf)->bo;
285       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = index_offset;
286       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = ETNA_RELOC_READ;
287       ctx->index_buffer.FE_INDEX_STREAM_CONTROL = translate_index_size(info->index_size);
288 
289       if (!ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo) {
290          BUG("Unsupported or no index buffer");
291          return;
292       }
293    } else {
294       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.bo = 0;
295       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.offset = 0;
296       ctx->index_buffer.FE_INDEX_STREAM_BASE_ADDR.flags = 0;
297       ctx->index_buffer.FE_INDEX_STREAM_CONTROL = 0;
298    }
299    ctx->dirty |= ETNA_DIRTY_INDEX_BUFFER;
300 
301    struct etna_shader_key key = {
302       .front_ccw = ctx->rasterizer->front_ccw,
303       .sprite_coord_enable = ctx->rasterizer->sprite_coord_enable,
304       .sprite_coord_yinvert = !!ctx->rasterizer->sprite_coord_mode,
305    };
306 
307    if (pfb->cbufs[0])
308       key.frag_rb_swap = !!translate_pe_format_rb_swap(pfb->cbufs[0]->format);
309 
310    if (!etna_get_vs(ctx, &key) || !etna_get_fs(ctx, &key)) {
311       BUG("compiled shaders are not okay");
312       return;
313    }
314 
315    /* Update any derived state */
316    if (!etna_state_update(ctx))
317       return;
318 
319    /*
320     * Figure out the buffers/features we need:
321     */
322    if (ctx->dirty & ETNA_DIRTY_ZSA) {
323       if (etna_depth_enabled(ctx))
324          resource_written(ctx, pfb->zsbuf->texture);
325 
326       if (etna_stencil_enabled(ctx))
327          resource_written(ctx, pfb->zsbuf->texture);
328    }
329 
330    if (ctx->dirty & ETNA_DIRTY_FRAMEBUFFER) {
331       for (i = 0; i < pfb->nr_cbufs; i++) {
332          struct pipe_resource *surf;
333 
334          if (!pfb->cbufs[i])
335             continue;
336 
337          surf = pfb->cbufs[i]->texture;
338          resource_written(ctx, surf);
339       }
340    }
341 
342    if (ctx->dirty & ETNA_DIRTY_SHADER) {
343       /* Mark constant buffers as being read */
344       u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_VERTEX].enabled_mask)
345          resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_VERTEX].cb[i].buffer);
346 
347       u_foreach_bit(i, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].enabled_mask)
348          resource_read(ctx, ctx->constant_buffer[PIPE_SHADER_FRAGMENT].cb[i].buffer);
349    }
350 
351    if (ctx->dirty & ETNA_DIRTY_VERTEX_BUFFERS) {
352       /* Mark VBOs as being read */
353       u_foreach_bit(i, ctx->vertex_buffer.enabled_mask) {
354          assert(!ctx->vertex_buffer.vb[i].is_user_buffer);
355          resource_read(ctx, ctx->vertex_buffer.vb[i].buffer.resource);
356       }
357    }
358 
359    if (ctx->dirty & ETNA_DIRTY_INDEX_BUFFER) {
360       /* Mark index buffer as being read */
361       resource_read(ctx, indexbuf);
362    }
363 
364    /* Mark textures as being read */
365    for (i = 0; i < PIPE_MAX_SAMPLERS; i++) {
366       if (ctx->sampler_view[i]) {
367          if (ctx->dirty & ETNA_DIRTY_SAMPLER_VIEWS)
368              resource_read(ctx, ctx->sampler_view[i]->texture);
369 
370          /* if texture was modified since the last update,
371           * we need to clear the texture cache and possibly
372           * resolve/update ts
373           */
374          etna_update_sampler_source(ctx->sampler_view[i], i);
375       }
376    }
377 
378    ctx->stats.prims_generated += u_reduced_prims_for_vertices(info->mode, draws[0].count);
379    ctx->stats.draw_calls++;
380 
381    /* Update state for this draw operation */
382    etna_update_state_for_draw(ctx, info);
383 
384    /* First, sync state, then emit DRAW_PRIMITIVES or DRAW_INDEXED_PRIMITIVES */
385    etna_emit_state(ctx);
386 
387    if (!VIV_FEATURE(screen, chipMinorFeatures6, NEW_GPIPE)) {
388       switch (draw_mode) {
389       case PRIMITIVE_TYPE_LINE_LOOP:
390       case PRIMITIVE_TYPE_LINE_STRIP:
391       case PRIMITIVE_TYPE_TRIANGLE_STRIP:
392       case PRIMITIVE_TYPE_TRIANGLE_FAN:
393          etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
394                         VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0 |
395                         VIVS_GL_VERTEX_ELEMENT_CONFIG_REUSE);
396          break;
397       default:
398          etna_set_state(ctx->stream, VIVS_GL_VERTEX_ELEMENT_CONFIG,
399                         VIVS_GL_VERTEX_ELEMENT_CONFIG_UNK0);
400          break;
401       }
402    }
403 
404    if (screen->specs.halti >= 2) {
405       /* On HALTI2+ (GC3000 and higher) only use instanced drawing commands, as the blob does */
406       etna_draw_instanced(ctx->stream, info->index_size, draw_mode, info->instance_count,
407          draws[0].count, info->index_size ? draws->index_bias : draws[0].start);
408    } else {
409       if (info->index_size)
410          etna_draw_indexed_primitives(ctx->stream, draw_mode, 0, prims, draws->index_bias);
411       else
412          etna_draw_primitives(ctx->stream, draw_mode, draws[0].start, prims);
413    }
414 
415    if (DBG_ENABLED(ETNA_DBG_DRAW_STALL)) {
416       /* Stall the FE after every draw operation.  This allows better
417        * debug of GPU hang conditions, as the FE will indicate which
418        * draw op has caused the hang. */
419       etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
420    }
421 
422    if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
423       pctx->flush(pctx, NULL, 0);
424 
425    if (ctx->framebuffer_s.cbufs[0])
426       etna_resource_level_mark_changed(etna_surface(ctx->framebuffer_s.cbufs[0])->level);
427    if (ctx->framebuffer_s.zsbuf)
428       etna_resource_level_mark_changed(etna_surface(ctx->framebuffer_s.zsbuf)->level);
429    if (info->index_size && indexbuf != info->index.resource)
430       pipe_resource_reference(&indexbuf, NULL);
431 }
432 
433 static void
etna_reset_gpu_state(struct etna_context * ctx)434 etna_reset_gpu_state(struct etna_context *ctx)
435 {
436    struct etna_cmd_stream *stream = ctx->stream;
437    struct etna_screen *screen = ctx->screen;
438    uint32_t dummy_attribs[VIVS_NFE_GENERIC_ATTRIB__LEN] = { 0 };
439 
440    if (ctx->compute_only) {
441       /* compute only context does not make use of any of the dirty state tracking. */
442       assert(ctx->dirty == 0);
443       assert(ctx->dirty_sampler_views == 0);
444       assert(ctx->prev_active_samplers == 0);
445 
446       etna_cmd_stream_mark_end_of_context_init(stream);
447 
448       return;
449    }
450 
451    etna_set_state(stream, VIVS_GL_API_MODE, VIVS_GL_API_MODE_OPENGL);
452    etna_set_state(stream, VIVS_PA_W_CLIP_LIMIT, 0x34000001);
453    etna_set_state(stream, VIVS_PA_FLAGS, 0x00000000); /* blob sets ZCONVERT_BYPASS on GC3000+, this messes up z for us */
454    etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A80, 0x38a01404);
455    etna_set_state(stream, VIVS_PA_VIEWPORT_UNK00A84, fui(8192.0));
456    etna_set_state(stream, VIVS_PA_ZFARCLIPPING, 0x00000000);
457    etna_set_state(stream, VIVS_RA_HDEPTH_CONTROL, 0x00007000);
458    etna_set_state(stream, VIVS_PS_CONTROL_EXT, 0x00000000);
459 
460    /* There is no HALTI0 specific state */
461    if (screen->specs.halti >= 1) { /* Only on HALTI1+ */
462       etna_set_state(stream, VIVS_VS_HALTI1_UNK00884, 0x00000808);
463    }
464    if (screen->specs.halti >= 2) { /* Only on HALTI2+ */
465       etna_set_state(stream, VIVS_RA_UNK00E0C, 0x00000000);
466    }
467    if (screen->specs.halti >= 3) { /* Only on HALTI3+ */
468       etna_set_state(stream, VIVS_PS_HALTI3_UNK0103C, 0x76543210);
469    }
470    if (screen->specs.halti >= 4) { /* Only on HALTI4+ */
471       etna_set_state(stream, VIVS_PS_MSAA_CONFIG, 0x6fffffff & 0xf70fffff & 0xfff6ffff &
472                                                   0xffff6fff & 0xfffff6ff & 0xffffff7f);
473       etna_set_state(stream, VIVS_PE_HALTI4_UNK014C0, 0x00000000);
474    }
475    if (screen->specs.halti >= 5) { /* Only on HALTI5+ */
476       etna_set_state(stream, VIVS_NTE_DESCRIPTOR_UNK14C40, 0x00000001);
477       etna_set_state(stream, VIVS_FE_HALTI5_UNK007D8, 0x00000002);
478       etna_set_state(stream, VIVS_PS_SAMPLER_BASE, 0x00000000);
479       etna_set_state(stream, VIVS_VS_SAMPLER_BASE, 0x00000020);
480       etna_set_state(stream, VIVS_SH_CONFIG, VIVS_SH_CONFIG_RTNE_ROUNDING);
481    } else { /* Only on pre-HALTI5 */
482       etna_set_state(stream, VIVS_GL_UNK03838, 0x00000000);
483       etna_set_state(stream, VIVS_GL_UNK03854, 0x00000000);
484    }
485 
486    if (VIV_FEATURE(screen, chipMinorFeatures4, BUG_FIXES18))
487       etna_set_state(stream, VIVS_GL_BUG_FIXES, 0x6);
488 
489    if (!screen->specs.use_blt) {
490       /* Enable SINGLE_BUFFER for resolve, if supported */
491       etna_set_state(stream, VIVS_RS_SINGLE_BUFFER, COND(screen->specs.single_buffer, VIVS_RS_SINGLE_BUFFER_ENABLE));
492    }
493 
494    if (screen->specs.halti >= 5) {
495       /* TXDESC cache flush - do this once at the beginning, as texture
496        * descriptors are only written by the CPU once, then patched by the kernel
497        * before command stream submission. It does not need flushing if the
498        * referenced image data changes.
499        */
500       etna_set_state(stream, VIVS_NTE_DESCRIPTOR_FLUSH, 0);
501       etna_set_state(stream, VIVS_GL_FLUSH_CACHE,
502             VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 |
503             VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13);
504 
505       /* Icache invalidate (should do this on shader change?) */
506       etna_set_state(stream, VIVS_VS_ICACHE_INVALIDATE,
507             VIVS_VS_ICACHE_INVALIDATE_UNK0 | VIVS_VS_ICACHE_INVALIDATE_UNK1 |
508             VIVS_VS_ICACHE_INVALIDATE_UNK2 | VIVS_VS_ICACHE_INVALIDATE_UNK3 |
509             VIVS_VS_ICACHE_INVALIDATE_UNK4);
510    }
511 
512    /* It seems that some GPUs (at least some GC400 have shown this behavior)
513     * come out of reset with random vertex attributes enabled and also don't
514     * disable them on the write to the first config register as normal. Enabling
515     * all attributes seems to provide the GPU with the required edge to actually
516     * disable the unused attributes on the next draw.
517     */
518    if (screen->specs.halti >= 5) {
519       etna_set_state_multi(stream, VIVS_NFE_GENERIC_ATTRIB_CONFIG0(0),
520                            VIVS_NFE_GENERIC_ATTRIB__LEN, dummy_attribs);
521    } else {
522       etna_set_state_multi(stream, VIVS_FE_VERTEX_ELEMENT_CONFIG(0),
523                            screen->specs.halti >= 0 ? 16 : 12, dummy_attribs);
524    }
525 
526    etna_cmd_stream_mark_end_of_context_init(stream);
527 
528    ctx->dirty = ~0L;
529    ctx->dirty_sampler_views = ~0L;
530    ctx->prev_active_samplers = ~0L;
531 }
532 
533 void
etna_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags,bool internal)534 etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
535            enum pipe_flush_flags flags, bool internal)
536 {
537    struct etna_context *ctx = etna_context(pctx);
538    int out_fence_fd = -1;
539 
540    list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
541       etna_acc_query_suspend(aq, ctx);
542 
543    if (!internal) {
544       /* flush all resources that need an implicit flush */
545       set_foreach(ctx->flush_resources, entry) {
546          struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
547 
548          pctx->flush_resource(pctx, prsc);
549          pipe_resource_reference(&prsc, NULL);
550       }
551       _mesa_set_clear(ctx->flush_resources, NULL);
552 
553       /* reset shared resources update tracking */
554       set_foreach(ctx->updated_resources, entry) {
555          struct pipe_resource *prsc = (struct pipe_resource *)entry->key;
556          pipe_resource_reference(&prsc, NULL);
557       }
558       _mesa_set_clear(ctx->updated_resources, NULL);
559    }
560 
561    etna_cmd_stream_flush(ctx->stream, ctx->in_fence_fd,
562                           (flags & PIPE_FLUSH_FENCE_FD) ? &out_fence_fd : NULL,
563                           ctx->is_noop);
564 
565    list_for_each_entry(struct etna_acc_query, aq, &ctx->active_acc_queries, node)
566       etna_acc_query_resume(aq, ctx);
567 
568    if (fence)
569       *fence = etna_fence_create(pctx, out_fence_fd);
570 
571    _mesa_hash_table_clear(ctx->pending_resources, NULL);
572 
573    etna_reset_gpu_state(ctx);
574 }
575 
576 static void
etna_context_flush(struct pipe_context * pctx,struct pipe_fence_handle ** fence,enum pipe_flush_flags flags)577 etna_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
578                    enum pipe_flush_flags flags)
579 {
580    etna_flush(pctx, fence, flags, false);
581 }
582 
583 static void
etna_context_force_flush(struct etna_cmd_stream * stream,void * priv)584 etna_context_force_flush(struct etna_cmd_stream *stream, void *priv)
585 {
586    struct pipe_context *pctx = priv;
587 
588    etna_flush(pctx, NULL, 0, true);
589 
590    /* update derived states as the context is now fully dirty */
591    etna_state_update(etna_context(pctx));
592 }
593 
594 void
etna_context_add_flush_resource(struct etna_context * ctx,struct pipe_resource * rsc)595 etna_context_add_flush_resource(struct etna_context *ctx,
596                                 struct pipe_resource *rsc)
597 {
598    bool found;
599 
600    _mesa_set_search_or_add(ctx->flush_resources, rsc, &found);
601 
602    if (!found)
603       pipe_reference(NULL, &rsc->reference);
604 }
605 
606 static void
etna_set_debug_callback(struct pipe_context * pctx,const struct util_debug_callback * cb)607 etna_set_debug_callback(struct pipe_context *pctx,
608                         const struct util_debug_callback *cb)
609 {
610    struct etna_context *ctx = etna_context(pctx);
611    struct etna_screen *screen = ctx->screen;
612 
613    util_queue_finish(&screen->shader_compiler_queue);
614    u_default_set_debug_callback(pctx, cb);
615 }
616 
617 struct pipe_context *
etna_context_create(struct pipe_screen * pscreen,void * priv,unsigned flags)618 etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
619 {
620    struct etna_context *ctx = CALLOC_STRUCT(etna_context);
621    struct etna_screen *screen;
622    struct pipe_context *pctx;
623 
624    if (ctx == NULL)
625       return NULL;
626 
627    pctx = &ctx->base;
628    pctx->priv = ctx;
629    pctx->screen = pscreen;
630    pctx->stream_uploader = u_upload_create_default(pctx);
631    if (!pctx->stream_uploader)
632       goto fail;
633    pctx->const_uploader = pctx->stream_uploader;
634 
635    screen = etna_screen(pscreen);
636    ctx->stream = etna_cmd_stream_new(screen->pipe, 0x2000,
637                                      &etna_context_force_flush, pctx);
638    if (ctx->stream == NULL)
639       goto fail;
640 
641    ctx->pending_resources = _mesa_pointer_hash_table_create(NULL);
642    if (!ctx->pending_resources)
643       goto fail;
644 
645    ctx->flush_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
646                                            _mesa_key_pointer_equal);
647    if (!ctx->flush_resources)
648       goto fail;
649 
650    ctx->updated_resources = _mesa_set_create(NULL, _mesa_hash_pointer,
651                                              _mesa_key_pointer_equal);
652    if (!ctx->updated_resources)
653       goto fail;
654 
655    /* context ctxate setup */
656    ctx->screen = screen;
657    /* need some sane default in case gallium frontends don't set some state: */
658    ctx->sample_mask = 0xffff;
659 
660    ctx->compute_only = flags & PIPE_CONTEXT_COMPUTE_ONLY;
661 
662    /*  Set sensible defaults for state */
663    etna_reset_gpu_state(ctx);
664 
665    ctx->in_fence_fd = -1;
666 
667    pctx->destroy = etna_context_destroy;
668    pctx->draw_vbo = etna_draw_vbo;
669    pctx->ml_subgraph_create = etna_ml_subgraph_create;
670    pctx->ml_subgraph_invoke = etna_ml_subgraph_invoke;
671    pctx->ml_subgraph_read_output = etna_ml_subgraph_read_outputs;
672    pctx->ml_subgraph_destroy = etna_ml_subgraph_destroy;
673    pctx->flush = etna_context_flush;
674    pctx->set_debug_callback = etna_set_debug_callback;
675    pctx->create_fence_fd = etna_create_fence_fd;
676    pctx->fence_server_sync = etna_fence_server_sync;
677    pctx->emit_string_marker = etna_emit_string_marker;
678    pctx->set_frontend_noop = etna_set_frontend_noop;
679    pctx->clear_buffer = u_default_clear_buffer;
680    pctx->clear_texture = u_default_clear_texture;
681 
682    /* creation of compile states */
683    pctx->create_blend_state = etna_blend_state_create;
684    pctx->create_rasterizer_state = etna_rasterizer_state_create;
685    pctx->create_depth_stencil_alpha_state = etna_zsa_state_create;
686 
687    etna_clear_blit_init(pctx);
688    etna_query_context_init(pctx);
689    etna_state_init(pctx);
690    etna_surface_init(pctx);
691    etna_shader_init(pctx);
692    etna_texture_init(pctx);
693    etna_transfer_init(pctx);
694 
695    ctx->blitter = util_blitter_create(pctx);
696    if (!ctx->blitter)
697       goto fail;
698 
699    slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
700    list_inithead(&ctx->active_acc_queries);
701 
702    return pctx;
703 
704 fail:
705    pctx->destroy(pctx);
706 
707    return NULL;
708 }
709 
710 bool
etna_render_condition_check(struct pipe_context * pctx)711 etna_render_condition_check(struct pipe_context *pctx)
712 {
713    struct etna_context *ctx = etna_context(pctx);
714 
715    if (!ctx->cond_query)
716       return true;
717 
718    perf_debug_ctx(ctx, "Implementing conditional rendering on the CPU");
719 
720    union pipe_query_result res = { 0 };
721    bool wait =
722       ctx->cond_mode != PIPE_RENDER_COND_NO_WAIT &&
723       ctx->cond_mode != PIPE_RENDER_COND_BY_REGION_NO_WAIT;
724 
725    if (pctx->get_query_result(pctx, ctx->cond_query, wait, &res))
726       return (bool)res.u64 != ctx->cond_cond;
727 
728    return true;
729 }
730