• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #ifndef FD5_EMIT_H
28 #define FD5_EMIT_H
29 
30 #include "pipe/p_context.h"
31 
32 #include "freedreno_context.h"
33 #include "fd5_context.h"
34 #include "fd5_format.h"
35 #include "fd5_program.h"
36 #include "ir3_shader.h"
37 
38 struct fd_ringbuffer;
39 
40 /* grouped together emit-state for prog/vertex/state emit: */
41 struct fd5_emit {
42 	struct pipe_debug_callback *debug;
43 	const struct fd_vertex_state *vtx;
44 	const struct fd_program_stateobj *prog;
45 	const struct pipe_draw_info *info;
46 	struct ir3_shader_key key;
47 	enum fd_dirty_3d_state dirty;
48 
49 	uint32_t sprite_coord_enable;  /* bitmask */
50 	bool sprite_coord_mode;
51 	bool rasterflat;
52 	bool no_decode_srgb;
53 
54 	/* in binning pass, we don't have real frag shader, so we
55 	 * don't know if real draw disqualifies lrz write.  So just
56 	 * figure that out up-front and stash it in the emit.
57 	 */
58 	bool no_lrz_write;
59 
60 	/* cached to avoid repeated lookups of same variants: */
61 	const struct ir3_shader_variant *vp, *fp;
62 	/* TODO: other shader stages.. */
63 
64 	unsigned streamout_mask;
65 };
66 
fd5_emit_format(struct pipe_surface * surf)67 static inline enum a5xx_color_fmt fd5_emit_format(struct pipe_surface *surf)
68 {
69 	if (!surf)
70 		return 0;
71 	return fd5_pipe2color(surf->format);
72 }
73 
74 static inline const struct ir3_shader_variant *
fd5_emit_get_vp(struct fd5_emit * emit)75 fd5_emit_get_vp(struct fd5_emit *emit)
76 {
77 	if (!emit->vp) {
78 		struct fd5_shader_stateobj *so = emit->prog->vp;
79 		emit->vp = ir3_shader_variant(so->shader, emit->key, emit->debug);
80 	}
81 	return emit->vp;
82 }
83 
84 static inline const struct ir3_shader_variant *
fd5_emit_get_fp(struct fd5_emit * emit)85 fd5_emit_get_fp(struct fd5_emit *emit)
86 {
87 	if (!emit->fp) {
88 		if (emit->key.binning_pass) {
89 			/* use dummy stateobj to simplify binning vs non-binning: */
90 			static const struct ir3_shader_variant binning_fp = {};
91 			emit->fp = &binning_fp;
92 		} else {
93 			struct fd5_shader_stateobj *so = emit->prog->fp;
94 			emit->fp = ir3_shader_variant(so->shader, emit->key, emit->debug);
95 		}
96 	}
97 	return emit->fp;
98 }
99 
100 static inline void
fd5_cache_flush(struct fd_batch * batch,struct fd_ringbuffer * ring)101 fd5_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
102 {
103 	fd_reset_wfi(batch);
104 	OUT_PKT4(ring, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
105 	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MIN_LO */
106 	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MIN_HI */
107 	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MAX_LO */
108 	OUT_RING(ring, 0x00000000);   /* UCHE_CACHE_INVALIDATE_MAX_HI */
109 	OUT_RING(ring, 0x00000012);   /* UCHE_CACHE_INVALIDATE */
110 	fd_wfi(batch, ring);
111 }
112 
113 static inline void
fd5_set_render_mode(struct fd_context * ctx,struct fd_ringbuffer * ring,enum render_mode_cmd mode)114 fd5_set_render_mode(struct fd_context *ctx, struct fd_ringbuffer *ring,
115 		enum render_mode_cmd mode)
116 {
117 	/* TODO add preemption support, gmem bypass, etc */
118 	emit_marker5(ring, 7);
119 	OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
120 	OUT_RING(ring, CP_SET_RENDER_MODE_0_MODE(mode));
121 	OUT_RING(ring, 0x00000000);   /* ADDR_LO */
122 	OUT_RING(ring, 0x00000000);   /* ADDR_HI */
123 	OUT_RING(ring, COND(mode == GMEM, CP_SET_RENDER_MODE_3_GMEM_ENABLE) |
124 			COND(mode == BINNING, CP_SET_RENDER_MODE_3_VSC_ENABLE));
125 	OUT_RING(ring, 0x00000000);
126 	emit_marker5(ring, 7);
127 }
128 
129 static inline void
fd5_emit_blit(struct fd_context * ctx,struct fd_ringbuffer * ring)130 fd5_emit_blit(struct fd_context *ctx, struct fd_ringbuffer *ring)
131 {
132 	struct fd5_context *fd5_ctx = fd5_context(ctx);
133 
134 	emit_marker5(ring, 7);
135 
136 	OUT_PKT7(ring, CP_EVENT_WRITE, 4);
137 	OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(BLIT));
138 	OUT_RELOCW(ring, fd5_ctx->blit_mem, 0, 0, 0);  /* ADDR_LO/HI */
139 	OUT_RING(ring, 0x00000000);
140 
141 	emit_marker5(ring, 7);
142 }
143 
144 static inline void
fd5_emit_render_cntl(struct fd_context * ctx,bool blit,bool binning)145 fd5_emit_render_cntl(struct fd_context *ctx, bool blit, bool binning)
146 {
147 	struct fd_ringbuffer *ring = binning ? ctx->batch->binning : ctx->batch->draw;
148 
149 	/* TODO eventually this partially depends on the pfb state, ie.
150 	 * which of the cbuf(s)/zsbuf has an UBWC flag buffer.. that part
151 	 * we could probably cache and just regenerate if framebuffer
152 	 * state is dirty (or something like that)..
153 	 *
154 	 * Other bits seem to depend on query state, like if samples-passed
155 	 * query is active.
156 	 */
157 	bool samples_passed = (fd5_context(ctx)->samples_passed_queries > 0);
158 	OUT_PKT4(ring, REG_A5XX_RB_RENDER_CNTL, 1);
159 	OUT_RING(ring, 0x00000000 |   /* RB_RENDER_CNTL */
160 			COND(binning, A5XX_RB_RENDER_CNTL_BINNING_PASS) |
161 			COND(binning, A5XX_RB_RENDER_CNTL_DISABLE_COLOR_PIPE) |
162 			COND(samples_passed, A5XX_RB_RENDER_CNTL_SAMPLES_PASSED) |
163 			COND(!blit, 0x8));
164 
165 	OUT_PKT4(ring, REG_A5XX_GRAS_SC_CNTL, 1);
166 	OUT_RING(ring, 0x00000008 |   /* GRAS_SC_CNTL */
167 			COND(binning, A5XX_GRAS_SC_CNTL_BINNING_PASS) |
168 			COND(samples_passed, A5XX_GRAS_SC_CNTL_SAMPLES_PASSED));
169 }
170 
171 static inline void
fd5_emit_lrz_flush(struct fd_ringbuffer * ring)172 fd5_emit_lrz_flush(struct fd_ringbuffer *ring)
173 {
174 	/* TODO I think the extra writes to GRAS_LRZ_CNTL are probably
175 	 * a workaround and not needed on all a5xx.
176 	 */
177 	OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_CNTL, 1);
178 	OUT_RING(ring, A5XX_GRAS_LRZ_CNTL_ENABLE);
179 
180 	OUT_PKT7(ring, CP_EVENT_WRITE, 1);
181 	OUT_RING(ring, LRZ_FLUSH);
182 
183 	OUT_PKT4(ring, REG_A5XX_GRAS_LRZ_CNTL, 1);
184 	OUT_RING(ring, 0x0);
185 }
186 
187 void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit);
188 
189 void fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
190 		struct fd5_emit *emit);
191 
192 void fd5_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
193 		struct ir3_shader_variant *cp);
194 
195 void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
196 
197 void fd5_emit_init(struct pipe_context *pctx);
198 
199 #endif /* FD5_EMIT_H */
200